2
0
mirror of https://git.coolaj86.com/coolaj86/proxy-packer.js.git synced 2025-04-21 20:20:37 +00:00

Compare commits

..

35 Commits

Author SHA1 Message Date
85dad9d458 Update 'README.md' 2019-09-21 22:43:15 +00:00
2b763f8606 v2.0.4: make prettier 2019-09-03 09:21:08 -06:00
e699c44480 v2.0.3: bugfix invalid access of socket._handle 2019-09-03 09:19:33 -06:00
7da7746a31 don't assume socket._handle exists 2019-09-03 09:14:47 -06:00
bcd332fea1 v2.0.2: handle undefined error message string 2018-08-12 02:39:24 -06:00
d8dd3b32b4 v2.0.1b: bump version in package.json 2018-08-08 22:40:06 -06:00
9241604639 v2.0.1: use Buffer.from() instead of new Buffer() 2018-08-08 01:22:56 -06:00
ece89be3dd v2.0.0: add connection event, fix 0-byte data parser issue 2018-08-07 03:44:15 -06:00
ff93145be2 v1.5.0: handle connection event and dataless events 2018-08-07 01:48:05 -06:00
5524b7dcac update docs with connection event and other clarifications 2018-08-07 01:46:42 -06:00
2e4e73e48b add connection event and packHeader function 2018-08-07 01:26:59 -06:00
178bd67375 update protocol section 2018-08-06 23:50:35 -06:00
AJ ONeal
b8c423edca leave the shackles behind 2018-05-31 05:35:49 -06:00
AJ ONeal
407e7c21c6 add localPort as serviceport 2018-05-31 05:13:27 -06:00
AJ ONeal
6ed367d3d7 more efficient (?) remotePort poker 2018-05-31 05:09:48 -06:00
AJ ONeal
7f3a5b4f04 explain control messages 2018-05-30 22:59:58 -06:00
AJ ONeal
ca885876d2 note 'error' service 2018-05-30 22:52:39 -06:00
AJ ONeal
6b2b9607ec update docs and cleanup files 2018-05-30 22:48:32 -06:00
AJ ONeal
175286e791 v1.4.2 2018-04-28 19:46:32 -06:00
AJ ONeal
ae2ad20059 v1.4.2 2018-04-28 19:44:09 -06:00
AJ ONeal
91965622b3 name change 2018-04-28 19:29:56 -06:00
AJ ONeal
6ae1cddcfc v1.4.1 2018-04-28 19:28:54 -06:00
AJ ONeal
74e7cc0fc7 v1.3.1 2018-04-28 19:25:46 -06:00
AJ ONeal
727d06c5d8 v1.2.1 2018-04-28 19:24:29 -06:00
AJ ONeal
57f8c3a88e v1.2.0 2018-04-28 19:24:13 -06:00
AJ ONeal
9f18e08bda update urls 2018-04-28 19:10:42 -06:00
AJ ONeal
47829f9fa8 use the actual packer, not just the algorithm 2017-02-15 23:15:17 -07:00
AJ ONeal
05220405c6 add row counter header to hexdumps 2017-02-15 23:06:50 -07:00
AJ ONeal
a40c6e5a68 add test for checking implementations 2017-02-15 23:04:56 -07:00
AJ ONeal
05aa40db12 merge 2017-01-03 00:07:41 -07:00
AJ ONeal
dd24c6bbae auto-update banner 2016-12-30 02:41:16 -07:00
AJ ONeal
f1c0888302 auto-update ad 2016-12-30 02:23:09 -07:00
7aee6ae977 Update README.md 2016-12-30 02:48:02 +00:00
AJ ONeal
016c3d2779 Update README.md 2016-11-29 17:19:54 -07:00
AJ ONeal
0d4a2aef01 Update README.md 2016-11-25 10:38:27 -07:00
10 changed files with 1099 additions and 505 deletions

293
README.md
View File

@ -1,38 +1,277 @@
# tunnel-packer # proxy-packer | a [Root](https://rootprojects.org) project
A strategy for packing and unpacking tunneled network messages (or any stream) in node.js "The M-PROXY Protocol" for node.js
Examples A strategy for packing and unpacking multiplexed streams.
<small>Where you have distinct clients on one side trying to reach distinct servers on the other.</small>
``` ```
var Packer = require('tunnel-packer'); Browser <--\ /--> Device
Browser <---- M-PROXY Service ----> Device
Browser <--/ \--> Device
```
Packer.create({ <small>Many clients may connect to a single device. A single client may connect to many devices.</small>
onmessage: function (msg) {
// msg = { family, address, port, service, data };
}
, onend: function (msg) {
// msg = { family, address, port };
}
, onerror: function (err) {
// err = { message, family, address, port };
}
});
var chunk = Packer.pack(address, data, service); It's the kind of thing you'd use to build a poor man's VPN, or port-forward router.
var addr = Packer.socketToAddr(socket);
var id = Packer.addrToId(address);
var id = Packer.socketToId(socket);
var myDuplex = Packer.Stream.create(socketOrStream); # The M-PROXY Protocol
This is similar to "The PROXY Protocol" (a la HAProxy), but desgined for multiplexed tls, http, tcp, and udp
tunneled over arbitrary streams (such as WebSockets).
It also has a backchannel for communicating with the proxy itself.
Each message has a header with a socket identifier (family, addr, port), and may have additional information.
```
<version><headerlen><family>,<address>,<port>,<datalen>,<service>,<port>,<name>
```
```
<254><45>IPv4,127.0.1.1,4321,199,https,443,example.com
```
```
version (8 bits) 254 is version 1
header length (8 bits) the remaining length of the header before data begins
These values are used to identify a specific client among many
socket family (string) the IPv4 or IPv6 connection from a client
socket address (string) the x.x.x.x remote address of the client
socket port (string) the 1-65536 source port of the remote client
data length (string) the number of bytes in the wrapped packet, in case the network re-chunks the packet
These optional values can be very useful at the start of a new connection
service name (string) Either a standard service name (port + protocol), such as 'https'
as listed in /etc/services, otherwise 'tls', 'tcp', or 'udp' for generics
Also used for messages with the proxy (i.e. authentication)
* 'control' for proxy<->server messages, including authentication, health, etc
* 'connection' for a specific client
* 'error' for a specific client
* 'pause' to pause upload to a specific client (not the whole tunnel)
* 'resume' to resume upload to a specific client (not the whole tunnel)
service port (string) The listening port, such as 443. Useful for non-standard or dynamic services.
host or server name (string) Useful for services that can be routed by name, such as http, https, smtp, and dns.
```
## Tunneled TCP SNI Packet
You should see that the result is simply all of the original packet with a leading header.
Note that `16 03 01 00` starts at the 29th byte (at index 28 or 0x1C) instead of at index 0:
```
0 1 2 3 4 5 6 7 8 9 A B C D D F
0000000 fe 1a 49 50 76 34 2c 31 32 37 2e 30 2e 31 2e 31 <-- 0xfe = v1, 0x1a = 26 more bytes for header
0000010 2c 34 34 33 2c 31 39 39 2c 66 6f 6f
16 03 01 00 <-- first 4 bytes of tcp packet
0000020 c2 01 00 00 be 03 03 57 e3 76 50 66 03 df 99 76
0000030 24 c8 31 e6 e8 08 34 6b b4 7b bb 2c f3 17 aa 5c
0000040 ec 09 da da 83 5a b2 00 00 56 00 ff c0 24 c0 23
0000050 c0 0a c0 09 c0 08 c0 28 c0 27 c0 14 c0 13 c0 12
0000060 c0 26 c0 25 c0 05 c0 04 c0 03 c0 2a c0 29 c0 0f
0000070 c0 0e c0 0d 00 6b 00 67 00 39 00 33 00 16 00 3d
0000080 00 3c 00 35 00 2f 00 0a c0 07 c0 11 c0 02 c0 0c
0000090 00 05 00 04 00 af 00 ae 00 8d 00 8c 00 8a 00 8b
00000a0 01 00 00 3f 00 00 00 19 00 17 00 00 14 70 6f 6b
00000b0 65 6d 61 70 2e 68 65 6c 6c 61 62 69 74 2e 63 6f
00000c0 6d 00 0a 00 08 00 06 00 17 00 18 00 19 00 0b 00
00000d0 02 01 00 00 0d 00 0c 00 0a 05 01 04 01 02 01 04
00000e0 03 02 03
00000e3
```
The v1 header uses strings for address and service descriptor information,
but future versions may be binary.
# API
```js
var Packer = require('proxy-packer');
```
## Unpacker / Parser State Machine
The unpacker creates a state machine.
Each data chunk going in must be in sequence (tcp guarantees this),
composing a full message with header and data (unless data length is 0).
The state machine progresses through these states:
- version
- headerLength
- header
- data
At the end of the data event (which may or may not contain a buffer of data)
one of the appropriate handlers will be called.
- control
- connection
- message
- pause
- resume
- end
- error
```js
unpacker = Packer.create(handlers); // Create a state machine for unpacking
unpacker.fns.addData(chunk); // process a chunk of data
handlers.oncontrol = function(tun) {}; // for communicating with the proxy
// tun.data is an array
// '[ -1, "[Error] bad hello" ]'
// '[ 0, "[Error] out-of-band error message" ]'
// '[ 1, "hello", 254, [ "add_token", "delete_token" ] ]'
// '[ 1, "add_token" ]'
// '[ 1, "delete_token" ]'
handlers.onconnection = function(tun) {}; // a client has established a connection
handlers.onmessage = function(tun) {}; // a client has sent a message
// tun = { family, address, port, data
// , service, serviceport, name };
handlers.onpause = function(tun) {}; // proxy requests to pause upload to a client
// tun = { family, address, port };
handlers.onresume = function(tun) {}; // proxy requests to resume upload to a client
// tun = { family, address, port };
handlers.onend = function(tun) {}; // proxy requests to close a client's socket
// tun = { family, address, port };
handlers.onerror = function(err) {}; // proxy is relaying a client's error
// err = { message, family, address, port };
```
<!--
TODO
handlers.onconnect = function (tun) { } // a new client has connected
-->
## Packer & Extras
Packs header metadata about connection into a buffer (potentially with original data), ready to send.
```js
var headerAndBody = Packer.pack(tun, data); // Add M-PROXY header to data
// tun = { family, address, port
// , service, serviceport, name }
var headerBuf = Packer.packHeader(tun, data); // Same as above, but creates a buffer for header only
// (data can be converted to a buffer or sent as-is)
var addr = Packer.socketToAddr(socket); // Probe raw, raw socket for address info
var id = Packer.addrToId(address); // Turn M-PROXY address info into a deterministic id
var id = Packer.socketToId(socket); // Turn raw, raw socket info into a deterministic id
```
## API Helpers
```js
var socket = Packer.Stream.wrapSocket(socketOrStream); // workaround for https://github.com/nodejs/node/issues/8854
// which was just closed recently, but probably still needs
// something more like this (below) to work as intended
// https://github.com/findhit/proxywrap/blob/master/lib/proxywrap.js
```
```js
var myTransform = Packer.Transform.create({ var myTransform = Packer.Transform.create({
address: { address: {
family: '...' family: '...',
, address: '...' address: '...',
, port: '...' port: '...'
} },
// hint at the service to be used // hint at the service to be used
, service: 'https' service: 'https'
}); });
``` ```
# Testing an implementation
If you want to write a compatible packer, just make sure that for any given input
you get the same output as the packer does.
```bash
node test/pack.js input.json output.bin
hexdump output.bin
```
Where `input.json` looks something like this:
`input.json`:
```
{ "version": 1
, "address": {
"family": "IPv4"
, "address": "127.0.1.1"
, "port": 4321
, "service": "foo"
, "serviceport": 443
, "name": 'example.com'
}
, "filepath": "./sni.tcp.bin"
}
```
## Raw TCP SNI Packet
and `sni.tcp.bin` is any captured tcp packet, such as this one with a tls hello:
`sni.tcp.bin`:
```
0 1 2 3 4 5 6 7 8 9 A B C D D F
0000000 16 03 01 00 c2 01 00 00 be 03 03 57 e3 76 50 66
0000010 03 df 99 76 24 c8 31 e6 e8 08 34 6b b4 7b bb 2c
0000020 f3 17 aa 5c ec 09 da da 83 5a b2 00 00 56 00 ff
0000030 c0 24 c0 23 c0 0a c0 09 c0 08 c0 28 c0 27 c0 14
0000040 c0 13 c0 12 c0 26 c0 25 c0 05 c0 04 c0 03 c0 2a
0000050 c0 29 c0 0f c0 0e c0 0d 00 6b 00 67 00 39 00 33
0000060 00 16 00 3d 00 3c 00 35 00 2f 00 0a c0 07 c0 11
0000070 c0 02 c0 0c 00 05 00 04 00 af 00 ae 00 8d 00 8c
0000080 00 8a 00 8b 01 00 00 3f 00 00 00 19 00 17 00 00
0000090 14 70 6f 6b 65 6d 61 70 2e 68 65 6c 6c 61 62 69
00000a0 74 2e 63 6f 6d 00 0a 00 08 00 06 00 17 00 18 00
00000b0 19 00 0b 00 02 01 00 00 0d 00 0c 00 0a 05 01 04
00000c0 01 02 01 04 03 02 03
00000c7
```
## Tunneled TCP SNI Packet
You should see that the result is simply all of the original packet with a leading header.
Note that `16 03 01 00` starts at the 29th byte (at index 28 or 0x1C) instead of at index 0:
```
0 1 2 3 4 5 6 7 8 9 A B C D D F
0000000 fe 1a 49 50 76 34 2c 31 32 37 2e 30 2e 31 2e 31 <-- 0xfe = v1, 0x1a = 26 more bytes for header
0000010 2c 34 34 33 2c 31 39 39 2c 66 6f 6f
16 03 01 00 <-- first 4 bytes of tcp packet
0000020 c2 01 00 00 be 03 03 57 e3 76 50 66 03 df 99 76
0000030 24 c8 31 e6 e8 08 34 6b b4 7b bb 2c f3 17 aa 5c
0000040 ec 09 da da 83 5a b2 00 00 56 00 ff c0 24 c0 23
0000050 c0 0a c0 09 c0 08 c0 28 c0 27 c0 14 c0 13 c0 12
0000060 c0 26 c0 25 c0 05 c0 04 c0 03 c0 2a c0 29 c0 0f
0000070 c0 0e c0 0d 00 6b 00 67 00 39 00 33 00 16 00 3d
0000080 00 3c 00 35 00 2f 00 0a c0 07 c0 11 c0 02 c0 0c
0000090 00 05 00 04 00 af 00 ae 00 8d 00 8c 00 8a 00 8b
00000a0 01 00 00 3f 00 00 00 19 00 17 00 00 14 70 6f 6b
00000b0 65 6d 61 70 2e 68 65 6c 6c 61 62 69 74 2e 63 6f
00000c0 6d 00 0a 00 08 00 06 00 17 00 18 00 19 00 0b 00
00000d0 02 01 00 00 0d 00 0c 00 0a 05 01 04 01 02 01 04
00000e0 03 02 03
00000e3
```

816
index.js
View File

@ -2,371 +2,525 @@
var Packer = module.exports; var Packer = module.exports;
Packer.create = function (opts) { var serviceEvents = {
var machine; default: 'tunnelData',
connection: 'tunnelConnection',
if (!opts.onMessage && !opts.onmessage) { control: 'tunnelControl',
machine = new (require('events').EventEmitter)(); error: 'tunnelError',
} else { end: 'tunnelEnd',
machine = {}; pause: 'tunnelPause',
} resume: 'tunnelResume'
};
machine.onMessage = opts.onmessage || opts.onMessage; var serviceFuncs = {
machine.onmessage = opts.onmessage || opts.onMessage; default: 'onmessage',
machine.onError = opts.onerror || opts.onError; connection: 'onconnection',
machine.onerror = opts.onerror || opts.onError; control: 'oncontrol',
machine.onEnd = opts.onend || opts.onEnd; error: 'onerror',
machine.onend = opts.onend || opts.onEnd; end: 'onend',
pause: 'onpause',
machine._version = 1; resume: 'onresume'
machine.state = 0;
machine.states = { 0: 'version', 1: 'headerLength', 2: 'header', 3: 'data'/*, 4: 'error'*/ };
machine.states_length = Object.keys(machine.states).length;
machine.chunkIndex = 0;
machine.fns = {};
machine.fns.version = function (chunk) {
//console.log('');
//console.log('[version]');
if ((255 - machine._version) !== chunk[machine.chunkIndex]) {
console.error("not v" + machine._version + " (or data is corrupt)");
// no idea how to fix this yet
}
machine.chunkIndex += 1;
return true;
};
machine.headerLen = 0;
machine.fns.headerLength = function (chunk) {
//console.log('');
//console.log('[headerLength]');
machine.headerLen = chunk[machine.chunkIndex];
machine.chunkIndex += 1;
return true;
};
machine.buf = null;
machine.bufIndex = 0;
//var buf = Buffer.alloc(4096);
machine.fns.header = function (chunk) {
//console.log('');
//console.log('[header]');
var curSize = machine.bufIndex + (chunk.length - machine.chunkIndex);
var partLen = 0;
var str = '';
var part;
if (curSize < machine.headerLen) {
// I still don't have the whole header,
// so just create a large enough buffer,
// write these bits, and wait for the next chunk.
if (!machine.buf) {
machine.buf = Buffer.alloc(machine.headerLen);
}
// partLen should be no more than the available size
partLen = Math.min(machine.headerLen - machine.bufIndex, chunk.length - machine.chunkIndex);
part = chunk.slice(machine.chunkIndex, machine.chunkIndex + partLen);
chunk.copy(machine.buf, machine.bufIndex, machine.chunkIndex, machine.chunkIndex + partLen);
machine.chunkIndex += partLen; // this MUST be chunk.length
machine.bufIndex += partLen;
return false;
}
else {
// it's now ready to discover the whole header
if (machine.buf) {
str += machine.buf.slice(0, machine.bufIndex).toString();
}
partLen = machine.headerLen - str.length;
part = chunk.slice(machine.chunkIndex, machine.chunkIndex + partLen);
str += part.toString();
machine.chunkIndex += partLen;
machine.buf = null; // back to null
machine.bufIndex = 0; // back to 0
machine._headers = str.split(/,/g);
machine.family = machine._headers[0];
machine.address = machine._headers[1];
machine.port = machine._headers[2];
machine.bodyLen = parseInt(machine._headers[3], 10) || -1;
machine.service = machine._headers[4];
//console.log('machine.service', machine.service);
return true;
}
};
machine.fns.data = function (chunk) {
//console.log('');
//console.log('[data]');
var curSize = machine.bufIndex + (chunk.length - machine.chunkIndex);
//console.log('curSize:', curSize);
//console.log('bodyLen:', machine.bodyLen, typeof machine.bodyLen);
var partLen = 0;
var msg;
var data;
partLen = Math.min(machine.bodyLen - machine.bufIndex, chunk.length - machine.chunkIndex);
if (curSize < machine.bodyLen) {
//console.log('curSize < bodyLen');
// I still don't have the whole header,
// so just create a large enough buffer,
// write these bits, and wait for the next chunk.
if (!machine.buf) {
machine.buf = Buffer.alloc(machine.bodyLen);
}
chunk.copy(machine.buf, machine.bufIndex, machine.chunkIndex, machine.chunkIndex + partLen);
machine.chunkIndex += partLen; // this MUST be chunk.length
machine.bufIndex += partLen;
return false;
}
if (machine.bufIndex > 0) {
// the completing remainder of the body is in the current slice
chunk.copy(machine.buf, machine.bufIndex, machine.chunkIndex, machine.chunkIndex + partLen);
}
else {
// the whole body is in the current slice
machine.buf = chunk.slice(machine.chunkIndex, machine.chunkIndex + partLen);
}
machine.bufIndex += partLen;
machine.service = machine.service;
data = machine.buf.slice(0, machine.bufIndex);
//console.log('machine.service', machine.service);
//
// data, end, error
//
if ('end' === machine.service) {
msg = {};
msg.family = machine.family;
msg.address = machine.address;
msg.port = machine.port;
msg.service = 'end';
msg.data = data;
if (machine.emit) {
machine.emit('tunnelEnd', msg);
}
else {
(machine.onend||machine.onmessage)(msg);
}
}
else if ('error' === machine.service) {
try {
msg = JSON.parse(machine.data.toString());
} catch(e) {
msg = new Error('unknown error');
}
msg.family = machine.family;
msg.address = machine.address;
msg.port = machine.port;
msg.service = 'error';
msg.data = data;
if (machine.emit) {
machine.emit('tunnelError', msg);
}
else {
(machine.onerror||machine.onmessage)(msg);
}
}
else {
msg = {};
msg.family = machine.family;
msg.address = machine.address;
msg.port = machine.port;
msg.service = machine.service;
msg.data = data;
if (machine.emit) {
machine.emit('tunnelData', msg);
}
else {
machine.onmessage(msg);
}
}
machine.chunkIndex += partLen; // === chunk.length
machine.buf = null; // reset to null
machine.bufIndex = 0; // reset to 0
return true;
};
machine.fns.addChunk = function (chunk) {
//console.log('');
//console.log('[addChunk]');
machine.chunkIndex = 0;
while (machine.chunkIndex < chunk.length) {
//console.log('chunkIndex:', machine.chunkIndex, 'state:', machine.state);
if (true === machine.fns[machine.states[machine.state]](chunk)) {
machine.state += 1;
machine.state %= machine.states_length;
}
}
};
return machine;
}; };
Packer.pack = function (address, data, service) { Packer.create = function(opts) {
data = data || Buffer.alloc(1); var machine;
if (!data.byteLength) {
data = Buffer.alloc(1);
}
if ('error' === service) { if (!opts.onMessage && !opts.onmessage) {
address.service = 'error'; machine = new (require('events')).EventEmitter();
} } else {
else if ('end' === service) { machine = {};
address.service = 'end'; }
}
var version = 1; machine.onmessage = opts.onmessage || opts.onMessage;
var header = Buffer.from([ machine.oncontrol = opts.oncontrol || opts.onControl;
/*servername,*/ address.family, address.address, address.port, data.byteLength machine.onconnection =
, (address.service || '') opts.onconnection || opts.onConnection || function() {};
].join(',')); machine.onerror = opts.onerror || opts.onError;
var meta = Buffer.from([ 255 - version, header.length ]); machine.onend = opts.onend || opts.onEnd;
var buf = Buffer.alloc(meta.byteLength + header.byteLength + data.byteLength); machine.onpause = opts.onpause || opts.onPause;
machine.onresume = opts.onresume || opts.onResume;
meta.copy(buf, 0, 0, meta.byteLength); machine._version = 1;
header.copy(buf, 2, 0, header.byteLength); machine.fns = {};
data.copy(buf, 2 + header.byteLength, 0, data.byteLength);
return buf; machine.chunkIndex = 0;
machine.buf = null;
machine.bufIndex = 0;
machine.fns.collectData = function(chunk, size) {
var chunkLeft = chunk.length - machine.chunkIndex;
var hasLen = size > 0;
if (!hasLen) {
return Buffer.alloc(0);
}
// First handle case where we don't have all the data we need yet. We need to save
// what we have in a buffer, and increment the index for both the buffer and the chunk.
if (machine.bufIndex + chunkLeft < size) {
if (!machine.buf) {
machine.buf = Buffer.alloc(size);
}
chunk.copy(machine.buf, machine.bufIndex, machine.chunkIndex);
machine.bufIndex += chunkLeft;
machine.chunkIndex += chunkLeft;
return null;
}
// Read and mark as read however much data we need from the chunk to complete our buffer.
var partLen = size - machine.bufIndex;
var part = chunk.slice(
machine.chunkIndex,
machine.chunkIndex + partLen
);
machine.chunkIndex += partLen;
// If we had nothing buffered than the part of the chunk we just read is all we need.
if (!machine.buf) {
return part;
}
// Otherwise we need to copy the new data into the buffer.
part.copy(machine.buf, machine.bufIndex);
// Before returning the buffer we need to clear our reference to it.
var buf = machine.buf;
machine.buf = null;
machine.bufIndex = 0;
return buf;
};
machine.fns.version = function(chunk) {
//console.log('');
//console.log('[version]');
if (255 - machine._version !== chunk[machine.chunkIndex]) {
console.error('not v' + machine._version + ' (or data is corrupt)');
// no idea how to fix this yet
}
machine.chunkIndex += 1;
return true;
};
machine.headerLen = 0;
machine.fns.headerLength = function(chunk) {
//console.log('');
//console.log('[headerLength]');
machine.headerLen = chunk[machine.chunkIndex];
machine.chunkIndex += 1;
return true;
};
machine.fns.header = function(chunk) {
//console.log('');
//console.log('[header]');
var header = machine.fns.collectData(chunk, machine.headerLen);
// We don't have the entire header yet so return false.
if (!header) {
return false;
}
machine._headers = header.toString().split(/,/g);
machine.family = machine._headers[0];
machine.address = machine._headers[1];
machine.port = machine._headers[2];
machine.bodyLen = parseInt(machine._headers[3], 10) || 0;
machine.service = machine._headers[4];
machine.serviceport = machine._headers[5];
machine.name = machine._headers[6];
machine.servicename = machine._headers[7];
//console.log('machine.service', machine.service);
return true;
};
machine.fns.data = function(chunk) {
//console.log('');
//console.log('[data]');
var data;
// The 'connection' event may not have a body
// Other events may not have a body either
if (machine.bodyLen) {
data = machine.fns.collectData(chunk, machine.bodyLen);
// We don't have the entire body yet so return false.
if (!data) {
return false;
}
}
//
// data, end, error
//
var msg = {};
if ('error' === machine.service) {
try {
msg = JSON.parse(data.toString());
} catch (e) {
msg.message = 'e:' + JSON.stringify(data);
msg.code = 'E_UNKNOWN_ERR';
}
}
msg.family = machine.family;
msg.address = machine.address;
msg.port = machine.port;
msg.service = machine.service;
msg.serviceport = machine.serviceport;
msg.name = machine.name;
msg.data = data;
if ('connection' === machine.service) {
msg.service = machine.servicename;
}
//console.log('msn', machine.service);
if (machine.emit) {
machine.emit(
serviceEvents[machine.service] ||
serviceEvents[msg.service] ||
serviceEvents.default
);
} else {
(machine[serviceFuncs[machine.service]] ||
machine[serviceFuncs[msg.service]] ||
machine[serviceFuncs.default])(msg);
}
return true;
};
machine.state = 0;
machine.states = ['version', 'headerLength', 'header', 'data'];
machine.fns.addChunk = function(chunk) {
//console.log('');
//console.log('[addChunk]');
machine.chunkIndex = 0;
while (machine.chunkIndex < chunk.length) {
//console.log('chunkIndex:', machine.chunkIndex, 'state:', machine.state);
if (true === machine.fns[machine.states[machine.state]](chunk)) {
machine.state += 1;
machine.state %= machine.states.length;
}
}
if ('data' === machine.states[machine.state] && 0 === machine.bodyLen) {
machine.fns[machine.states[machine.state]](chunk);
machine.state += 1;
machine.state %= machine.states.length;
}
};
return machine;
}; };
Packer.socketToAddr = function (socket) { Packer.packHeader = function(meta, data, service, andBody, oldways) {
// TODO BUG XXX if (oldways && !data) {
// https://github.com/nodejs/node/issues/8854 data = Buffer.from(' ');
// tlsSocket.remoteAddress = remoteAddress; // causes core dump }
// console.log(tlsSocket.remoteAddress); if (data && !Buffer.isBuffer(data)) {
data = Buffer.from(JSON.stringify(data));
}
if (oldways && !data.byteLength) {
data = Buffer.from(' ');
}
return { if (service && -1 === ['control', 'connection'].indexOf(service)) {
family: //console.log('end?', service);
socket.remoteFamily meta.service = service;
|| socket._remoteFamily }
|| socket._handle._parentWrap.remoteFamily
|| socket._handle._parentWrap._handle.owner.stream.remoteFamily var size = (data && data.byteLength) || 0;
, address: var sizeReserve = andBody ? size : 0;
socket.remoteAddress var version = 1;
|| socket._remoteAddress var header;
|| socket._handle._parentWrap.remoteAddress if (service === 'control') {
|| socket._handle._parentWrap._handle.owner.stream.remoteAddress header = Buffer.from(['', '', '', size, service].join(','));
, port: } else if (service === 'connection') {
socket.remotePort header = Buffer.from(
|| socket._remotePort [
|| socket._handle._parentWrap.remotePort meta.family,
|| socket._handle._parentWrap._handle.owner.stream.remotePort meta.address,
}; meta.port,
size,
'connection',
meta.serviceport || '',
meta.name || '',
meta.service || ''
].join(',')
);
} else {
header = Buffer.from(
[
meta.family,
meta.address,
meta.port,
size,
meta.service || '',
meta.serviceport || '',
meta.name || ''
].join(',')
);
}
var metaBuf = Buffer.from([255 - version, header.length]);
var buf = Buffer.alloc(
metaBuf.byteLength + header.byteLength + sizeReserve
);
metaBuf.copy(buf, 0);
header.copy(buf, 2);
if (sizeReserve) {
data.copy(buf, 2 + header.byteLength);
}
return buf;
};
Packer.pack = function(meta, data, service) {
return Packer.packHeader(meta, data, service, true, true);
}; };
Packer.addrToId = function (address) { function extractSocketProps(socket, propNames) {
return address.family + ',' + address.address + ',' + address.port; var props = {};
if (socket.remotePort) {
propNames.forEach(function(propName) {
props[propName] = socket[propName];
});
} else if (socket._remotePort) {
propNames.forEach(function(propName) {
props[propName] = socket['_' + propName];
});
} else if (socket._handle) {
if (
socket._handle._parent &&
socket._handle._parent.owner &&
socket._handle._parent.owner.stream &&
socket._handle._parent.owner.stream.remotePort
) {
propNames.forEach(function(propName) {
props[propName] = socket._handle._parent.owner.stream[propName];
});
} else if (
socket._handle._parentWrap &&
socket._handle._parentWrap.remotePort
) {
propNames.forEach(function(propName) {
props[propName] = socket._handle._parentWrap[propName];
});
} else if (
socket._handle._parentWrap &&
socket._handle._parentWrap._handle &&
socket._handle._parentWrap._handle.owner &&
socket._handle._parentWrap._handle.owner.stream &&
socket._handle._parentWrap._handle.owner.stream.remotePort
) {
propNames.forEach(function(propName) {
props[propName] =
socket._handle._parentWrap._handle.owner.stream[propName];
});
}
}
return props;
}
function extractSocketProp(socket, propName) {
// remoteAddress, remotePort... ugh... https://github.com/nodejs/node/issues/8854
var value = socket[propName] || socket['_' + propName];
try {
value = value || socket._handle._parent.owner.stream[propName];
} catch (e) {}
try {
value = value || socket._handle._parentWrap[propName];
value =
value || socket._handle._parentWrap._handle.owner.stream[propName];
} catch (e) {}
return value || '';
}
Packer.socketToAddr = function(socket) {
// TODO BUG XXX
// https://github.com/nodejs/node/issues/8854
// tlsSocket.remoteAddress = remoteAddress; // causes core dump
// console.log(tlsSocket.remoteAddress);
var props = extractSocketProps(socket, [
'remoteFamily',
'remoteAddress',
'remotePort',
'localPort'
]);
return {
family: props.remoteFamily,
address: props.remoteAddress,
port: props.remotePort,
serviceport: props.localPort
};
}; };
Packer.socketToId = function (socket) { Packer.addrToId = function(address) {
return Packer.addrToId(Packer.socketToAddr(socket)); return address.family + ',' + address.address + ',' + address.port;
}; };
Packer.socketToId = function(socket) {
return Packer.addrToId(Packer.socketToAddr(socket));
};
var addressNames = [
'remoteAddress',
'remotePort',
'remoteFamily',
'localAddress',
'localPort'
];
/* /*
* var sockFuncs = [
* Tunnel Packer 'address'
* , 'destroy'
*/ , 'ref'
, 'unref'
, 'setEncoding'
, 'setKeepAlive'
, 'setNoDelay'
, 'setTimeout'
];
*/
// Unlike Packer.Stream.create this should handle all of the events needed to make everything work.
Packer.wrapSocket = function(socket) {
// node v10.2+ doesn't need a workaround for https://github.com/nodejs/node/issues/8854
addressNames.forEach(function(name) {
Object.defineProperty(socket, name, {
enumerable: false,
configurable: true,
get: function() {
return extractSocketProp(socket, name);
}
});
});
return socket;
// Improved workaround for https://github.com/nodejs/node/issues/8854
/*
// TODO use defineProperty to override remotePort, etc
var myDuplex = new require('stream').Duplex();
addressNames.forEach(function (name) {
myDuplex[name] = extractSocketProp(socket, name);
});
// Handle everything needed for the write part of the Duplex. We need to overwrite the
// `end` function because there is no other way to know when the other side calls it.
myDuplex._write = socket.write.bind(socket);
myDuplex.end = socket.end.bind(socket);
// Handle everything needed for the read part of the Duplex. See the example under
// https://nodejs.org/api/stream.html#stream_readable_push_chunk_encoding.
myDuplex._read = function () {
socket.resume();
};
socket.on('data', function (chunk) {
if (!myDuplex.push(chunk)) {
socket.pause();
}
});
socket.on('end', function () {
myDuplex.push(null);
});
// Handle the the things not directly related to reading or writing
socket.on('error', function (err) {
console.error('[error] wrapped socket errored - ' + err.toString());
myDuplex.emit('error', err);
});
socket.on('close', function () {
myDuplex.emit('close');
});
sockFuncs.forEach(function (name) {
if (typeof socket[name] !== 'function') {
console.warn('expected `'+name+'` to be a function on wrapped socket');
} else {
myDuplex[name] = socket[name].bind(socket);
}
});
return myDuplex;
*/
};
var Transform = require('stream').Transform; var Transform = require('stream').Transform;
var util = require('util'); var util = require('util');
function MyTransform(options) { function MyTransform(options) {
if (!(this instanceof MyTransform)) { if (!(this instanceof MyTransform)) {
return new MyTransform(options); return new MyTransform(options);
} }
this.__my_addr = options.address; this.__my_addr = options.address;
this.__my_service = options.service; this.__my_service = options.service;
Transform.call(this, options); this.__my_serviceport = options.serviceport;
this.__my_name = options.name;
Transform.call(this, options);
} }
util.inherits(MyTransform, Transform); util.inherits(MyTransform, Transform);
function transform(me, data, encoding, callback) {
var address = me.__my_addr;
address.service = address.service || me.__my_service; MyTransform.prototype._transform = function(data, encoding, callback) {
me.push(Packer.pack(address, data)); var address = this.__my_addr;
callback();
} address.service = address.service || this.__my_service;
MyTransform.prototype._transform = function (data, encoding, callback) { address.serviceport = address.serviceport || this.__my_serviceport;
return transform(this, data, encoding, callback); address.name = address.name || this.__my_name;
this.push(Packer.pack(address, data));
callback();
}; };
Packer.Stream = {}; Packer.Stream = {};
var Dup = { var Dup = {
write: function (chunk, encoding, cb) { write: function(chunk, encoding, cb) {
//console.log('_write', chunk.byteLength); //console.log('_write', chunk.byteLength);
this.__my_socket.write(chunk, encoding); this.__my_socket.write(chunk, encoding, cb);
cb(); },
} read: function(size) {
, read: function (size) { //console.log('_read');
//console.log('_read'); var x = this.__my_socket.read(size);
var x = this.__my_socket.read(size); if (x) {
if (x) { console.log('_read', size);
console.log('_read', size); this.push(x);
this.push(x); }
} }
}
}; };
Packer.Stream.create = function (socket) { Packer.Stream.create = function(socket) {
// Workaround for if (!Packer.Stream.warned) {
// https://github.com/nodejs/node/issues/8854 console.warn('`Stream.create` deprecated, use `wrapSocket` instead');
Packer.Stream.warned = true;
}
// https://www.google.com/#q=get+socket+address+from+file+descriptor // Workaround for
// TODO try creating a new net.Socket({ handle: socket._handle, fd: socket._handle.fd }) // https://github.com/nodejs/node/issues/8854
// from the old one and then adding back the data with
// sock.push(firstChunk)
var Duplex = require('stream').Duplex;
var myDuplex = new Duplex();
myDuplex.__my_socket = socket; // https://www.google.com/#q=get+socket+address+from+file+descriptor
myDuplex._write = Dup.write; // TODO try creating a new net.Socket({ handle: socket._handle, fd: socket._handle.fd })
myDuplex._read = Dup.read; // from the old one and then adding back the data with
//console.log('plainSocket.*Address'); // sock.push(firstChunk)
//console.log('remote:', socket.remoteAddress); var Duplex = require('stream').Duplex;
//console.log('local:', socket.localAddress); var myDuplex = new Duplex();
//console.log('address():', socket.address());
myDuplex.remoteFamily = socket.remoteFamily;
myDuplex.remoteAddress = socket.remoteAddress;
myDuplex.remotePort = socket.remotePort;
myDuplex.localFamily = socket.localFamily;
myDuplex.localAddress = socket.localAddress;
myDuplex.localPort = socket.localPort;
return myDuplex; myDuplex.__my_socket = socket;
myDuplex._write = Dup.write;
myDuplex._read = Dup.read;
//console.log('plainSocket.*Address');
//console.log('remote:', socket.remoteAddress);
//console.log('local:', socket.localAddress);
//console.log('address():', socket.address());
myDuplex.remoteFamily = socket.remoteFamily;
myDuplex.remoteAddress = socket.remoteAddress;
myDuplex.remotePort = socket.remotePort;
myDuplex.localFamily = socket.localFamily;
myDuplex.localAddress = socket.localAddress;
myDuplex.localPort = socket.localPort;
return myDuplex;
}; };
Packer.Transform = {}; Packer.Transform = {};
Packer.Transform.create = function (opts) { Packer.Transform.create = function(opts) {
// Note: service refers to the port that the incoming request was from, // Note: service refers to the port that the incoming request was from,
// if known (smtps, smtp, https, http, etc) // if known (smtps, smtp, https, http, etc)
// { address: '127.0.0.1', service: 'https' } // { address: '127.0.0.1', service: 'https' }
return new MyTransform(opts); return new MyTransform(opts);
}; };

View File

@ -1,34 +1,40 @@
{ {
"name": "tunnel-packer", "name": "proxy-packer",
"version": "1.1.0", "version": "2.0.4",
"description": "A strategy for packing and unpacking tunneled network messages (or any stream)", "description": "A strategy for packing and unpacking a proxy stream (i.e. packets through a tunnel). Handles multiplexed and tls connections. Used by telebit and telebitd.",
"main": "index.js", "main": "index.js",
"scripts": { "scripts": {
"test": "node test.js" "test": "node test.js"
}, },
"repository": { "repository": {
"type": "git", "type": "git",
"url": "git+https://github.com/Daplie/tunnel-packer.git" "url": "git+https://git.coolaj86.com/coolaj86/proxy-packer.js.git"
}, },
"keywords": [ "keywords": [
"tunnel", "tunnel",
"tcp", "telebit",
"sni", "telebitd",
"https", "localtunnel",
"ssl", "ngrok",
"http", "underpass",
"proxy", "tcp",
"pack", "sni",
"unpack", "https",
"message", "ssl",
"msg", "tls",
"packer", "http",
"unpacker" "proxy",
], "pack",
"author": "AJ ONeal <coolaj86@gmail.com> (https://coolaj86.com/)", "unpack",
"license": "(MIT OR Apache-2.0)", "message",
"bugs": { "msg",
"url": "https://github.com/Daplie/tunnel-packer/issues" "packer",
}, "unpacker"
"homepage": "https://github.com/Daplie/tunnel-packer#readme" ],
"author": "AJ ONeal <coolaj86@gmail.com> (https://coolaj86.com/)",
"license": "(MIT OR Apache-2.0)",
"bugs": {
"url": "https://git.coolaj86.com/coolaj86/proxy-packer.js/issues"
},
"homepage": "https://git.coolaj86.com/coolaj86/proxy-packer.js#readme"
} }

115
test.js
View File

@ -1,115 +0,0 @@
'use strict';
var sni = require('sni');
var hello = require('fs').readFileSync('./sni.hello.bin');
var version = 1;
var address = {
family: 'IPv4'
, address: '127.0.1.1'
, port: 443
, service: 'foo'
};
var header = address.family + ',' + address.address + ',' + address.port + ',' + hello.byteLength
+ ',' + (address.service || '')
;
var buf = Buffer.concat([
Buffer.from([ 255 - version, header.length ])
, Buffer.from(header)
, hello
]);
var services = { 'ssh': 22, 'http': 4080, 'https': 8443 };
var clients = {};
var count = 0;
var packer = require('./');
var machine = packer.create({
onmessage: function (opts) {
var id = opts.family + ',' + opts.address + ',' + opts.port;
var service = 'https';
var port = services[service];
var servername = sni(opts.data);
console.log('');
console.log('[onMessage]');
if (!opts.data.equals(hello)) {
throw new Error("'data' packet is not equal to original 'hello' packet");
}
console.log('all', opts.data.byteLength, 'bytes are equal');
console.log('src:', opts.family, opts.address + ':' + opts.port);
console.log('dst:', 'IPv4 127.0.0.1:' + port);
if (!clients[id]) {
clients[id] = true;
if (!servername) {
throw new Error("no servername found for '" + id + "'");
}
console.log("servername: '" + servername + "'");
}
count += 1;
}
, onerror: function () {
throw new Error("Did not expect onerror");
}
, onend: function () {
throw new Error("Did not expect onend");
}
});
var packed = packer.pack(address, hello);
if (!packed.equals(buf)) {
console.error(buf.toString('hex') === packed.toString('hex'));
console.error(packed.toString('hex'), packed.byteLength);
console.error(buf.toString('hex'), buf.byteLength);
throw new Error("packer did not pack as expected");
}
console.log('');
// full message in one go
// 223 = 2 + 22 + 199
console.log('[WHOLE BUFFER]', 2, header.length, hello.length, buf.byteLength);
clients = {};
machine.fns.addChunk(buf);
console.log('');
// messages one byte at a time
console.log('[BYTE-BY-BYTE BUFFER]', 1);
clients = {};
buf.forEach(function (byte) {
machine.fns.addChunk(Buffer.from([ byte ]));
});
console.log('');
// split messages in overlapping thirds
// 0-2 (2)
// 2-24 (22)
// 24-223 (199)
// 223-225 (2)
// 225-247 (22)
// 247-446 (199)
buf = Buffer.concat([ buf, buf ]);
console.log('[OVERLAPPING BUFFERS]', buf.length);
clients = {};
[ buf.slice(0, 7) // version + header
, buf.slice(7, 14) // header
, buf.slice(14, 21) // header
, buf.slice(21, 28) // header + body
, buf.slice(28, 217) // body
, buf.slice(217, 224) // body + version
, buf.slice(224, 238) // version + header
, buf.slice(238, buf.byteLength) // header + body
].forEach(function (buf) {
machine.fns.addChunk(Buffer.from(buf));
});
console.log('');
process.on('exit', function () {
if (count !== 4) {
throw new Error("should have delivered 4 messages, not", count);
}
console.log('TESTS PASS');
console.log('');
});

11
test/input.json Normal file
View File

@ -0,0 +1,11 @@
{
"version": 1,
"address": {
"family": "IPv4",
"address": "127.0.1.1",
"port": 4321,
"service": "https",
"serviceport": 443
},
"filepath": "./sni.hello.bin"
}

BIN
test/output.bin Normal file

Binary file not shown.

16
test/output.hexdump Normal file
View File

@ -0,0 +1,16 @@
0000000 fe 1a 49 50 76 34 2c 31 32 37 2e 30 2e 31 2e 31
0000010 2c 34 34 33 2c 31 39 39 2c 66 6f 6f 16 03 01 00
0000020 c2 01 00 00 be 03 03 57 e3 76 50 66 03 df 99 76
0000030 24 c8 31 e6 e8 08 34 6b b4 7b bb 2c f3 17 aa 5c
0000040 ec 09 da da 83 5a b2 00 00 56 00 ff c0 24 c0 23
0000050 c0 0a c0 09 c0 08 c0 28 c0 27 c0 14 c0 13 c0 12
0000060 c0 26 c0 25 c0 05 c0 04 c0 03 c0 2a c0 29 c0 0f
0000070 c0 0e c0 0d 00 6b 00 67 00 39 00 33 00 16 00 3d
0000080 00 3c 00 35 00 2f 00 0a c0 07 c0 11 c0 02 c0 0c
0000090 00 05 00 04 00 af 00 ae 00 8d 00 8c 00 8a 00 8b
00000a0 01 00 00 3f 00 00 00 19 00 17 00 00 14 70 6f 6b
00000b0 65 6d 61 70 2e 68 65 6c 6c 61 62 69 74 2e 63 6f
00000c0 6d 00 0a 00 08 00 06 00 17 00 18 00 19 00 0b 00
00000d0 02 01 00 00 0d 00 0c 00 0a 05 01 04 01 02 01 04
00000e0 03 02 03
00000e3

55
test/pack.js Normal file
View File

@ -0,0 +1,55 @@
(function() {
'use strict';
var fs = require('fs');
var infile = process.argv[2];
var outfile = process.argv[3];
var sni = require('sni');
if (!infile || !outfile) {
console.error('Usage:');
console.error('node test/pack.js test/input.json test/output.bin');
process.exit(1);
return;
}
var path = require('path');
var json = JSON.parse(fs.readFileSync(infile, 'utf8'));
var data = require('fs').readFileSync(
path.resolve(path.dirname(infile), json.filepath),
null
);
var Packer = require('../index.js');
var servername = sni(data);
var m = data.toString().match(/(?:^|[\r\n])Host: ([^\r\n]+)[\r\n]*/im);
var hostname = ((m && m[1].toLowerCase()) || '').split(':')[0];
/*
function pack() {
var version = json.version;
var address = json.address;
var header = address.family + ',' + address.address + ',' + address.port + ',' + data.byteLength
+ ',' + (address.service || '') + ',' + (address.serviceport || '') + ',' + (servername || hostname || '')
;
var buf = Buffer.concat([
Buffer.from([ 255 - version, header.length ])
, Buffer.from(header)
, data
]);
}
*/
json.address.name = servername || hostname;
var buf = Packer.pack(json.address, data);
fs.writeFileSync(outfile, buf, null);
console.log(
'wrote ' +
buf.byteLength +
" bytes to '" +
outfile +
"' ('hexdump " +
outfile +
"' to inspect)"
);
})();

228
test/parse.js Normal file
View File

@ -0,0 +1,228 @@
'use strict';
var sni = require('sni');
var hello = require('fs').readFileSync(__dirname + '/sni.hello.bin');
var version = 1;
function getAddress() {
return {
family: 'IPv4',
address: '127.0.1.1',
port: 4321,
service: 'foo-https',
serviceport: 443,
name: 'foo-pokemap.hellabit.com'
};
}
var addr = getAddress();
var connectionHeader =
addr.family +
',' +
addr.address +
',' +
addr.port +
',0,connection,' +
(addr.serviceport || '') +
',' +
(addr.name || '') +
',' +
(addr.service || '');
var header =
addr.family +
',' +
addr.address +
',' +
addr.port +
',' +
hello.byteLength +
',' +
(addr.service || '') +
',' +
(addr.serviceport || '') +
',' +
(addr.name || '');
var endHeader =
addr.family +
',' +
addr.address +
',' +
addr.port +
',0,end,' +
(addr.serviceport || '') +
',' +
(addr.name || '');
var buf = Buffer.concat([
Buffer.from([255 - version, connectionHeader.length]),
Buffer.from(connectionHeader),
Buffer.from([255 - version, header.length]),
Buffer.from(header),
hello,
Buffer.from([255 - version, endHeader.length]),
Buffer.from(endHeader)
]);
var services = { ssh: 22, http: 4080, https: 8443 };
var clients = {};
var count = 0;
var packer = require('../');
var machine = packer.create({
onconnection: function(tun) {
console.info('');
if (!tun.service || 'connection' === tun.service) {
throw new Error('missing service: ' + JSON.stringify(tun));
}
console.info('[onConnection]');
count += 1;
},
onmessage: function(tun) {
//console.log('onmessage', tun);
var id = tun.family + ',' + tun.address + ',' + tun.port;
var service = 'https';
var port = services[service];
var servername = sni(tun.data);
console.info(
'[onMessage]',
service,
port,
servername,
tun.data.byteLength
);
if (!tun.data.equals(hello)) {
throw new Error(
"'data' packet is not equal to original 'hello' packet"
);
}
//console.log('all', tun.data.byteLength, 'bytes are equal');
//console.log('src:', tun.family, tun.address + ':' + tun.port + ':' + tun.serviceport);
//console.log('dst:', 'IPv4 127.0.0.1:' + port);
if (!clients[id]) {
clients[id] = true;
if (!servername) {
throw new Error("no servername found for '" + id + "'");
}
//console.log("servername: '" + servername + "'", tun.name);
}
count += 1;
},
onerror: function() {
throw new Error('Did not expect onerror');
},
onend: function() {
console.info('[onEnd]');
count += 1;
}
});
var packts, packed;
packts = [];
packts.push(packer.packHeader(getAddress(), null, 'connection'));
//packts.push(packer.pack(address, hello));
packts.push(packer.packHeader(getAddress(), hello));
packts.push(hello);
packts.push(packer.packHeader(getAddress(), null, 'end'));
packed = Buffer.concat(packts);
if (!packed.equals(buf)) {
console.error('');
console.error(buf.toString('hex') === packed.toString('hex'));
console.error('');
console.error('auto-packed:');
console.error(packed.toString('hex'), packed.byteLength);
console.error('');
console.error('hand-packed:');
console.error(buf.toString('hex'), buf.byteLength);
console.error('');
throw new Error('packer (new) did not pack as expected');
}
packts = [];
packts.push(packer.pack(getAddress(), null, 'connection'));
packts.push(packer.pack(getAddress(), hello));
//packts.push(packer.packHeader(getAddress(), hello));
//packts.push(hello);
packts.push(packer.pack(getAddress(), null, 'end'));
packed = Buffer.concat(packts);
// XXX TODO REMOVE
//
// Nasty fix for short-term backwards-compat
//
// In the old way of doing things we always have at least one byte
// of data (due to a parser bug which has now been fixed) and so
// there are two strings padded with a space which gives the
// data a length of 1 rather than 0
//
// Here all four of those instances are replaced, but it requires
// maching a few things on either side.
//
// Only 6 bytes are changed - two 1 => 0, four ' ' => ''
var hex = packed
.toString('hex')
//.replace(/2c313939/, '2c30')
.replace(/32312c312c636f/, '32312c302c636f')
.replace(/3332312c312c656e64/, '3332312c302c656e64')
.replace(/7320/, '73')
.replace(/20$/, '');
if (hex !== buf.toString('hex')) {
console.error('');
console.error(buf.toString('hex') === hex);
console.error('');
console.error('auto-packed:');
console.error(hex, packed.byteLength);
console.error('');
console.error('hand-packed:');
console.error(buf.toString('hex'), buf.byteLength);
console.error('');
throw new Error('packer (old) did not pack as expected');
}
console.info('');
// full message in one go
// 223 = 2 + 22 + 199
console.info('[WHOLE BUFFER]', 2, header.length, hello.length, buf.byteLength);
clients = {};
machine.fns.addChunk(buf);
console.info('');
// messages one byte at a time
console.info('[BYTE-BY-BYTE BUFFER]', 1);
clients = {};
buf.forEach(function(byte) {
machine.fns.addChunk(Buffer.from([byte]));
});
console.info('');
// split messages in overlapping thirds
// 0-2 (2)
// 2-24 (22)
// 24-223 (199)
// 223-225 (2)
// 225-247 (22)
// 247-446 (199)
buf = Buffer.concat([buf, buf]);
console.info('[OVERLAPPING BUFFERS]', buf.length);
clients = {};
[
buf.slice(0, 7), // version + header
buf.slice(7, 14), // header
buf.slice(14, 21), // header
buf.slice(21, 28), // header + body
buf.slice(28, 217), // body
buf.slice(217, 224), // body + version
buf.slice(224, 238), // version + header
buf.slice(238, buf.byteLength) // header + body
].forEach(function(buf) {
machine.fns.addChunk(Buffer.from(buf));
});
console.info('');
process.on('exit', function() {
if (count !== 12) {
throw new Error('should have delivered 12 messages, not ' + count);
}
console.info('TESTS PASS');
console.info('');
});