add dynamic tcp and cleanup
This commit is contained in:
parent
df3c1c3b04
commit
b086e1c0a5
|
@ -63,7 +63,7 @@ function applyConfig(config) {
|
|||
}
|
||||
|
||||
function approveDomains(opts, certs, cb) {
|
||||
console.log('[debug] approveDomains', opts.domains);
|
||||
if (state.debug) { console.log('[debug] approveDomains', opts.domains); }
|
||||
// This is where you check your database and associated
|
||||
// email addresses with domains and agreements and such
|
||||
|
||||
|
@ -75,11 +75,12 @@ function applyConfig(config) {
|
|||
return;
|
||||
}
|
||||
|
||||
if (state.config.vhost) {
|
||||
console.log('[sni] vhost checking is turned on');
|
||||
if (!state.validHosts) { state.validHosts = {}; }
|
||||
if (!state.validHosts[opts.domains[0]] && state.config.vhost) {
|
||||
if (state.debug) { console.log('[sni] vhost checking is turned on'); }
|
||||
var vhost = state.config.vhost.replace(/:hostname/, opts.domains[0]);
|
||||
require('fs').readdir(vhost, function (err, nodes) {
|
||||
console.log('[sni] checking fs vhost');
|
||||
if (state.debug) { console.log('[sni] checking fs vhost', opts.domains[0], !err); }
|
||||
if (err) { check(); return; }
|
||||
if (nodes) { approve(); }
|
||||
});
|
||||
|
@ -87,8 +88,10 @@ function applyConfig(config) {
|
|||
}
|
||||
|
||||
function approve() {
|
||||
state.validHosts[opts.domains[0]] = true;
|
||||
opts.email = state.config.email;
|
||||
opts.agreeTos = state.config.agreeTos;
|
||||
opts.communityMember = state.config.communityMember || state.config.greenlock.communityMember;
|
||||
opts.challenges = {
|
||||
// TODO dns-01
|
||||
'http-01': require('le-challenge-fs').create({ webrootPath: '/tmp/acme-challenges' })
|
||||
|
@ -98,41 +101,28 @@ function applyConfig(config) {
|
|||
}
|
||||
|
||||
function check() {
|
||||
console.log('[sni] checking servername');
|
||||
if (state.debug) { console.log('[sni] checking servername'); }
|
||||
if (-1 !== state.servernames.indexOf(opts.domain) || -1 !== (state._servernames||[]).indexOf(opts.domain)) {
|
||||
approve();
|
||||
} else {
|
||||
cb(new Error("failed the approval chain '" + opts.domains[0] + "'"));
|
||||
}
|
||||
console.log('Approve Domains cb');
|
||||
}
|
||||
|
||||
check();
|
||||
}
|
||||
|
||||
/*
|
||||
if (!config.email || !config.agreeTos) {
|
||||
console.error("You didn't specify --email <EMAIL> and --agree-tos");
|
||||
console.error("(required for ACME / Let's Encrypt / Greenlock TLS/SSL certs)");
|
||||
console.error("");
|
||||
process.exit(1);
|
||||
}
|
||||
*/
|
||||
|
||||
state.greenlock = Greenlock.create({
|
||||
|
||||
version: state.config.greenlock.version || 'draft-11'
|
||||
, server: state.config.greenlock.server || 'https://acme-v02.api.letsencrypt.org/directory'
|
||||
//, server: 'https://acme-staging-v02.api.letsencrypt.org/directory'
|
||||
|
||||
, store: require('le-store-certbot').create({ debug: true, webrootPath: '/tmp/acme-challenges' })
|
||||
, store: require('le-store-certbot').create({ debug: state.config.debug || state.config.greenlock.debug, webrootPath: '/tmp/acme-challenges' })
|
||||
|
||||
, approveDomains: approveDomains
|
||||
|
||||
, telemetry: state.config.telemetry || state.config.greenlock.telemetry
|
||||
, configDir: state.config.greenlock.configDir
|
||||
, debug: state.config.debug || state.config.greenlock.debug
|
||||
|
||||
//, approvedDomains: program.servernames
|
||||
|
||||
});
|
||||
|
||||
require('../handlers').create(state); // adds directly to config for now...
|
||||
|
@ -147,14 +137,13 @@ function applyConfig(config) {
|
|||
wss.on('connection', netConnHandlers.ws);
|
||||
state.ports.forEach(function (port) {
|
||||
if (state.tcp[port]) {
|
||||
console.error("skipping previously added port " + port);
|
||||
console.warn("[cli] skipping previously added port " + port);
|
||||
return;
|
||||
}
|
||||
state.tcp[port] = net.createServer();
|
||||
state.tcp[port].listen(port, function () {
|
||||
console.log('listening plain TCP on ' + port);
|
||||
console.info('[cli] Listening for TCP connections on', port);
|
||||
});
|
||||
//state.tcp[port].on('connection', function (conn) { netConnHandlers.tcp(conn, port); });
|
||||
state.tcp[port].on('connection', netConnHandlers.tcp);
|
||||
});
|
||||
//});
|
||||
|
|
56
handlers.js
56
handlers.js
|
@ -19,7 +19,7 @@ module.exports.create = function (state) {
|
|||
var setupTlsOpts = {
|
||||
SNICallback: function (servername, cb) {
|
||||
if (!setupSniCallback) {
|
||||
console.error("No way to get https certificates...");
|
||||
console.error("[setup.SNICallback] No way to get https certificates...");
|
||||
cb(new Error("telebitd sni setup fail"));
|
||||
return;
|
||||
}
|
||||
|
@ -29,7 +29,6 @@ module.exports.create = function (state) {
|
|||
|
||||
// Probably a reverse proxy on an internal network (or ACME challenge)
|
||||
function notFound(req, res) {
|
||||
console.log('req.socket.encrypted', req.socket.encrypted);
|
||||
res.statusCode = 404;
|
||||
res.end("File not found.\n");
|
||||
}
|
||||
|
@ -79,10 +78,10 @@ module.exports.create = function (state) {
|
|||
// tlsServer.emit('connection', socket); // this didn't work either
|
||||
//console.log('chunkLen', firstChunk.byteLength);
|
||||
|
||||
console.log('httpsInvalid servername', servername);
|
||||
console.log('[httpsInvalid] servername', servername);
|
||||
//state.tlsInvalidSniServer.emit('connection', wrapSocket(socket));
|
||||
var tlsInvalidSniServer = tls.createServer(state.tlsOptions, function (tlsSocket) {
|
||||
console.log('tls connection');
|
||||
console.log('[tlsInvalid] tls connection');
|
||||
// things get a little messed up here
|
||||
var httpInvalidSniServer = http.createServer(function (req, res) {
|
||||
if (!servername) {
|
||||
|
@ -118,10 +117,8 @@ module.exports.create = function (state) {
|
|||
var serveAdmin = require('serve-static')(__dirname + '/admin', { redirect: true });
|
||||
var finalhandler = require('finalhandler');
|
||||
state.httpTunnelServer = http.createServer(function (req, res) {
|
||||
console.log('admin req.socket.encrypted', req.socket.encrypted);
|
||||
res.setHeader('connection', 'close');
|
||||
serveAdmin(req, res, function () {
|
||||
console.log("serveAdmin fail");
|
||||
finalhandler(req, res)
|
||||
});
|
||||
});
|
||||
|
@ -129,17 +126,13 @@ module.exports.create = function (state) {
|
|||
tunnelAdminTlsOpts[key] = state.tlsOptions[key];
|
||||
});
|
||||
if (state.greenlock && state.greenlock.tlsOptions) {
|
||||
console.log('greenlock tlsOptions for SNICallback');
|
||||
tunnelAdminTlsOpts.SNICallback = function (servername, cb) {
|
||||
console.log("time to handle '" + servername + "'");
|
||||
state.greenlock.tlsOptions.SNICallback(servername, cb);
|
||||
};
|
||||
tunnelAdminTlsOpts.SNICallback = state.greenlock.tlsOptions.SNICallback;
|
||||
} else {
|
||||
console.log('custom or null tlsOptions for SNICallback');
|
||||
console.log('[Admin] custom or null tlsOptions for SNICallback');
|
||||
tunnelAdminTlsOpts.SNICallback = tunnelAdminTlsOpts.SNICallback || noSniCallback('admin');
|
||||
}
|
||||
state.tlsTunnelServer = tls.createServer(tunnelAdminTlsOpts, function (tlsSocket) {
|
||||
console.log('(Admin) tls connection');
|
||||
if (state.debug) { console.log('[Admin] new tls-terminated connection'); }
|
||||
// things get a little messed up here
|
||||
(state.httpTunnelServer || state.httpServer).emit('connection', tlsSocket);
|
||||
});
|
||||
|
@ -152,7 +145,7 @@ module.exports.create = function (state) {
|
|||
// tlsServer.emit('connection', socket); // this didn't work either
|
||||
//console.log('chunkLen', firstChunk.byteLength);
|
||||
|
||||
console.log('httpsTunnel (Admin) servername', servername);
|
||||
if (state.debug) { console.log('[Admin] new raw tls connection for', servername); }
|
||||
state.tlsTunnelServer.emit('connection', wrapSocket(socket));
|
||||
};
|
||||
|
||||
|
@ -162,28 +155,26 @@ module.exports.create = function (state) {
|
|||
var serveSetup = require('serve-static')(__dirname + '/admin/setup', { redirect: true });
|
||||
var finalhandler = require('finalhandler');
|
||||
state.httpSetupServer = http.createServer(function (req, res) {
|
||||
console.log('req.socket.encrypted', req.socket.encrypted);
|
||||
if (req.socket.encrypted) {
|
||||
serveSetup(req, res, finalhandler(req, res));
|
||||
return;
|
||||
}
|
||||
console.log('try greenlock middleware');
|
||||
(state.greenlock && state.greenlock.middleware(redirectHttpsAndClose)
|
||||
|| redirectHttpsAndClose)(req, res, function () {
|
||||
console.log('fallthrough to setup ui');
|
||||
console.log('[Setup] fallthrough to setup ui');
|
||||
serveSetup(req, res, finalhandler(req, res));
|
||||
});
|
||||
});
|
||||
state.tlsSetupServer = tls.createServer(setupTlsOpts, function (tlsSocket) {
|
||||
console.log('tls connection');
|
||||
console.log('[Setup] terminated tls connection');
|
||||
// things get a little messed up here
|
||||
state.httpSetupServer.emit('connection', tlsSocket);
|
||||
});
|
||||
state.tlsSetupServer.on('tlsClientError', function () {
|
||||
console.error('tlsClientError SetupServer');
|
||||
console.error('[Setup] tlsClientError SetupServer');
|
||||
});
|
||||
state.httpsSetupServer = function (servername, socket) {
|
||||
console.log('httpsTunnel (Setup) servername', servername);
|
||||
console.log('[Setup] raw tls connection for', servername);
|
||||
state._servernames = [servername];
|
||||
state.config.agreeTos = true; // TODO: BUG XXX BAD, make user accept
|
||||
setupSniCallback = state.greenlock.tlsOptions.SNICallback || noSniCallback('setup');
|
||||
|
@ -194,31 +185,30 @@ module.exports.create = function (state) {
|
|||
// vhost
|
||||
//
|
||||
state.httpVhost = http.createServer(function (req, res) {
|
||||
console.log('httpVhost (local)');
|
||||
console.log('req.socket.encrypted', req.socket.encrypted);
|
||||
if (state.debug) { console.log('[vhost] encrypted?', req.socket.encrypted); }
|
||||
|
||||
var finalhandler = require('finalhandler');
|
||||
// TODO compare SNI to hostname?
|
||||
var host = (req.headers.host||'').toLowerCase().trim();
|
||||
var serveSetup = require('serve-static')(state.config.vhost.replace(/:hostname/g, host), { redirect: true });
|
||||
var serveVhost = require('serve-static')(state.config.vhost.replace(/:hostname/g, host), { redirect: true });
|
||||
|
||||
if (req.socket.encrypted) { serveSetup(req, res, finalhandler(req, res)); return; }
|
||||
if (req.socket.encrypted) { serveVhost(req, res, finalhandler(req, res)); return; }
|
||||
|
||||
console.log('try greenlock middleware for vhost');
|
||||
(state.greenlock && state.greenlock.middleware(redirectHttpsAndClose)
|
||||
|| redirectHttpsAndClose)(req, res, function () {
|
||||
console.log('fallthrough to vhost serving???');
|
||||
serveSetup(req, res, finalhandler(req, res));
|
||||
});
|
||||
if (!state.greenlock) {
|
||||
console.error("Cannot vhost without greenlock options");
|
||||
res.end("Cannot vhost without greenlock options");
|
||||
}
|
||||
|
||||
state.greenlock.middleware(redirectHttpsAndClose);
|
||||
});
|
||||
state.tlsVhost = tls.createServer(
|
||||
{ SNICallback: function (servername, cb) {
|
||||
console.log('tlsVhost debug SNICallback', servername);
|
||||
if (state.debug) { console.log('[vhost] SNICallback for', servername); }
|
||||
tunnelAdminTlsOpts.SNICallback(servername, cb);
|
||||
}
|
||||
}
|
||||
, function (tlsSocket) {
|
||||
console.log('tlsVhost (local)');
|
||||
if (state.debug) { console.log('tlsVhost (local)'); }
|
||||
state.httpVhost.emit('connection', tlsSocket);
|
||||
}
|
||||
);
|
||||
|
@ -226,7 +216,7 @@ module.exports.create = function (state) {
|
|||
console.error('tlsClientError Vhost');
|
||||
});
|
||||
state.httpsVhost = function (servername, socket) {
|
||||
console.log('httpsVhost (local)', servername);
|
||||
if (state.debug) { console.log('[vhost] httpsVhost (local) for', servername); }
|
||||
state.tlsVhost.emit('connection', wrapSocket(socket));
|
||||
};
|
||||
};
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
'use strict';
|
||||
|
||||
var Packer = require('proxy-packer');
|
||||
|
||||
module.exports = function pipeWs(servername, service, conn, remote, serviceport) {
|
||||
var browserAddr = Packer.socketToAddr(conn);
|
||||
var cid = Packer.addrToId(browserAddr);
|
||||
browserAddr.service = service;
|
||||
browserAddr.serviceport = serviceport;
|
||||
browserAddr.name = servername;
|
||||
conn.tunnelCid = cid;
|
||||
var rid = Packer.socketToId(remote.upgradeReq.socket);
|
||||
|
||||
//if (state.debug) { console.log('[pipeWs] client', cid, '=> remote', rid, 'for', servername, 'via', service); }
|
||||
|
||||
function sendWs(data, serviceOverride) {
|
||||
if (remote.ws && (!conn.tunnelClosing || serviceOverride)) {
|
||||
try {
|
||||
remote.ws.send(Packer.pack(browserAddr, data, serviceOverride), { binary: true });
|
||||
// If we can't send data over the websocket as fast as this connection can send it to us
|
||||
// (or there are a lot of connections trying to send over the same websocket) then we
|
||||
// need to pause the connection for a little. We pause all connections if any are paused
|
||||
// to make things more fair so a connection doesn't get stuck waiting for everyone else
|
||||
// to finish because it got caught on the boundary. Also if serviceOverride is set it
|
||||
// means the connection is over, so no need to pause it.
|
||||
if (!serviceOverride && (remote.pausedConns.length || remote.ws.bufferedAmount > 1024*1024)) {
|
||||
// console.log('pausing', cid, 'to allow web socket to catch up');
|
||||
conn.pause();
|
||||
remote.pausedConns.push(conn);
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn('[pipeWs] remote', rid, ' => client', cid, 'error sending websocket message', err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
remote.clients[cid] = conn;
|
||||
|
||||
conn.on('data', function (chunk) {
|
||||
//if (state.debug) { console.log('[pipeWs] client', cid, ' => remote', rid, chunk.byteLength, 'bytes'); }
|
||||
sendWs(chunk);
|
||||
});
|
||||
|
||||
conn.on('error', function (err) {
|
||||
console.warn('[pipeWs] client', cid, 'connection error:', err);
|
||||
});
|
||||
|
||||
conn.on('close', function (hadErr) {
|
||||
//if (state.debug) { console.log('[pipeWs] client', cid, 'closing'); }
|
||||
sendWs(null, hadErr ? 'error': 'end');
|
||||
delete remote.clients[cid];
|
||||
});
|
||||
|
||||
};
|
|
@ -1,57 +1,10 @@
|
|||
'use strict';
|
||||
|
||||
var Packer = require('proxy-packer');
|
||||
var sni = require('sni');
|
||||
var pipeWs = require('./pipe-ws.js');
|
||||
|
||||
function pipeWs(servername, service, conn, remote, serviceport) {
|
||||
console.log('[pipeWs] servername:', servername, 'service:', service);
|
||||
|
||||
var browserAddr = Packer.socketToAddr(conn);
|
||||
browserAddr.service = service;
|
||||
browserAddr.serviceport = serviceport;
|
||||
browserAddr.name = servername;
|
||||
var cid = Packer.addrToId(browserAddr);
|
||||
conn.tunnelCid = cid;
|
||||
console.log('[pipeWs] browser is', cid, 'home-cloud is', Packer.socketToId(remote.upgradeReq.socket));
|
||||
|
||||
function sendWs(data, serviceOverride) {
|
||||
if (remote.ws && (!conn.tunnelClosing || serviceOverride)) {
|
||||
try {
|
||||
remote.ws.send(Packer.pack(browserAddr, data, serviceOverride), { binary: true });
|
||||
// If we can't send data over the websocket as fast as this connection can send it to us
|
||||
// (or there are a lot of connections trying to send over the same websocket) then we
|
||||
// need to pause the connection for a little. We pause all connections if any are paused
|
||||
// to make things more fair so a connection doesn't get stuck waiting for everyone else
|
||||
// to finish because it got caught on the boundary. Also if serviceOverride is set it
|
||||
// means the connection is over, so no need to pause it.
|
||||
if (!serviceOverride && (remote.pausedConns.length || remote.ws.bufferedAmount > 1024*1024)) {
|
||||
// console.log('pausing', cid, 'to allow web socket to catch up');
|
||||
conn.pause();
|
||||
remote.pausedConns.push(conn);
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn('[pipeWs] error sending websocket message', err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
remote.clients[cid] = conn;
|
||||
conn.on('data', function (chunk) {
|
||||
console.log('[pipeWs] data from browser to tunneler', chunk.byteLength);
|
||||
sendWs(chunk);
|
||||
});
|
||||
conn.on('error', function (err) {
|
||||
console.warn('[pipeWs] browser connection error', err);
|
||||
});
|
||||
conn.on('close', function (hadErr) {
|
||||
console.log('[pipeWs] browser connection closing');
|
||||
sendWs(null, hadErr ? 'error': 'end');
|
||||
delete remote.clients[cid];
|
||||
});
|
||||
}
|
||||
|
||||
module.exports.createTcpConnectionHandler = function (copts) {
|
||||
var Devices = copts.Devices;
|
||||
module.exports.createTcpConnectionHandler = function (state) {
|
||||
var Devices = state.Devices;
|
||||
|
||||
return function onTcpConnection(conn, serviceport) {
|
||||
serviceport = serviceport || conn.localPort;
|
||||
|
@ -78,7 +31,7 @@ module.exports.createTcpConnectionHandler = function (copts) {
|
|||
// defer after return (instead of being in many places)
|
||||
function deferData(fn) {
|
||||
if (fn) {
|
||||
copts[fn](servername, conn)
|
||||
state[fn](servername, conn)
|
||||
}
|
||||
process.nextTick(function () {
|
||||
conn.resume();
|
||||
|
@ -93,51 +46,50 @@ module.exports.createTcpConnectionHandler = function (copts) {
|
|||
function tryTls() {
|
||||
var vhost;
|
||||
|
||||
console.log("");
|
||||
|
||||
if (!copts.servernames.length) {
|
||||
console.log("https => admin => setup => (needs bogus tls certs to start?)");
|
||||
if (!state.servernames.length) {
|
||||
console.info("[Setup] https => admin => setup => (needs bogus tls certs to start?)");
|
||||
deferData('httpsSetupServer');
|
||||
return;
|
||||
}
|
||||
|
||||
if (-1 !== copts.servernames.indexOf(servername)) {
|
||||
console.log("Lock and load, admin interface time!");
|
||||
if (-1 !== state.servernames.indexOf(servername)) {
|
||||
if (state.debug) { console.log("[Admin]", servername); }
|
||||
deferData('httpsTunnel');
|
||||
return;
|
||||
}
|
||||
|
||||
if (copts.config.nowww && /^www\./i.test(servername)) {
|
||||
if (state.config.nowww && /^www\./i.test(servername)) {
|
||||
console.log("TODO: use www bare redirect");
|
||||
}
|
||||
|
||||
function run() {
|
||||
if (!servername) {
|
||||
console.log("No SNI was given, so there's nothing we can do here");
|
||||
if (state.debug) { console.log("No SNI was given, so there's nothing we can do here"); }
|
||||
deferData('httpsInvalid');
|
||||
return;
|
||||
}
|
||||
|
||||
var nextDevice = Devices.next(copts.deviceLists, servername);
|
||||
var nextDevice = Devices.next(state.deviceLists, servername);
|
||||
if (!nextDevice) {
|
||||
console.log("No devices match the given servername");
|
||||
if (state.debug) { console.log("No devices match the given servername"); }
|
||||
deferData('httpsInvalid');
|
||||
return;
|
||||
}
|
||||
|
||||
console.log("pipeWs(servername, service, socket, deviceLists['" + servername + "'])");
|
||||
if (state.debug) { console.log("pipeWs(servername, service, socket, deviceLists['" + servername + "'])"); }
|
||||
deferData();
|
||||
pipeWs(servername, service, conn, nextDevice, serviceport);
|
||||
}
|
||||
|
||||
if (copts.config.vhost) {
|
||||
console.log("VHOST path", copts.config.vhost);
|
||||
vhost = copts.config.vhost.replace(/:hostname/, (servername||''));
|
||||
console.log("VHOST name", vhost);
|
||||
//copts.httpsVhost(servername, conn);
|
||||
// TODO don't run an fs check if we already know this is working elsewhere
|
||||
//if (!state.validHosts) { state.validHosts = {}; }
|
||||
if (state.config.vhost) {
|
||||
vhost = state.config.vhost.replace(/:hostname/, (servername||''));
|
||||
if (state.debug) { console.log("[tcp] [vhost]", state.config.vhost, "=>", vhost); }
|
||||
//state.httpsVhost(servername, conn);
|
||||
//return;
|
||||
require('fs').readdir(vhost, function (err, nodes) {
|
||||
console.log("VHOST error?", err);
|
||||
if (state.debug && err) { console.log("VHOST error", err); }
|
||||
if (err) { run(); return; }
|
||||
if (nodes) { deferData('httpsVhost'); }
|
||||
});
|
||||
|
@ -152,7 +104,7 @@ module.exports.createTcpConnectionHandler = function (copts) {
|
|||
// TLS
|
||||
service = 'https';
|
||||
servername = (sni(firstChunk)||'').toLowerCase();
|
||||
console.log("tls hello servername:", servername);
|
||||
if (state.debug) { console.log("[tcp] tls hello from '" + servername + "'"); }
|
||||
tryTls();
|
||||
return;
|
||||
}
|
||||
|
@ -161,13 +113,13 @@ module.exports.createTcpConnectionHandler = function (copts) {
|
|||
str = firstChunk.toString();
|
||||
m = str.match(/(?:^|[\r\n])Host: ([^\r\n]+)[\r\n]*/im);
|
||||
servername = (m && m[1].toLowerCase() || '').split(':')[0];
|
||||
console.log('servername', servername);
|
||||
if (state.debug) { console.log("[tcp] http hostname '" + servername + "'"); }
|
||||
|
||||
if (/HTTP\//i.test(str)) {
|
||||
if (!copts.servernames.length) {
|
||||
console.log('copts.httpSetupServer', copts.httpSetupServer);
|
||||
if (!state.servernames.length) {
|
||||
console.info("[tcp] No admin servername. Entering setup mode.");
|
||||
deferData();
|
||||
copts.httpSetupServer.emit('connection', conn);
|
||||
state.httpSetupServer.emit('connection', conn);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -176,9 +128,9 @@ module.exports.createTcpConnectionHandler = function (copts) {
|
|||
// /^\/\.well-known\/acme-challenge\//.test(str)
|
||||
if (/well-known/.test(str)) {
|
||||
// HTTP
|
||||
if (Devices.exist(copts.deviceLists, servername)) {
|
||||
if (Devices.exist(state.deviceLists, servername)) {
|
||||
deferData();
|
||||
pipeWs(servername, service, conn, Devices.next(copts.deviceLists, servername), serviceport);
|
||||
pipeWs(servername, service, conn, Devices.next(state.deviceLists, servername), serviceport);
|
||||
return;
|
||||
}
|
||||
deferData('handleHttp');
|
||||
|
|
108
telebitd.js
108
telebitd.js
|
@ -12,6 +12,7 @@ function timeoutPromise(duration) {
|
|||
}
|
||||
|
||||
var Devices = require('./lib/device-tracker');
|
||||
var pipeWs = require('./lib/pipe-ws.js');
|
||||
|
||||
module.exports.store = { Devices: Devices };
|
||||
module.exports.create = function (state) {
|
||||
|
@ -22,8 +23,70 @@ module.exports.create = function (state) {
|
|||
state.Devices = Devices;
|
||||
var onTcpConnection = require('./lib/unwrap-tls').createTcpConnectionHandler(state);
|
||||
|
||||
// TODO Use a Single TCP Handler
|
||||
// Issues:
|
||||
// * dynamic ports are dedicated to a device or cluster
|
||||
// * servernames could come in on ports that belong to a different device
|
||||
// * servernames could come in that belong to no device
|
||||
// * this could lead to an attack / security vulnerability with ACME certificates
|
||||
// Solutions
|
||||
// * Restrict dynamic ports to a particular device
|
||||
// * Restrict the use of servernames
|
||||
function onDynTcpConn(conn) {
|
||||
var serviceport = this.address().port;
|
||||
console.log('[DynTcpConn] new connection on', serviceport);
|
||||
var remote = Devices.next(state.deviceLists, serviceport)
|
||||
|
||||
if (!remote) {
|
||||
conn.write("[Sanity Error] I've got a blank space baby, but nowhere to write your name.");
|
||||
conn.end();
|
||||
try {
|
||||
this.close();
|
||||
} catch(e) {
|
||||
console.error("[DynTcpConn] failed to close server:", e);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
conn.once('data', function (firstChunk) {
|
||||
console.log("[DynTcp] examining firstChunk", serviceport);
|
||||
conn.pause();
|
||||
conn.unshift(firstChunk);
|
||||
|
||||
var servername;
|
||||
var hostname;
|
||||
var str;
|
||||
var m;
|
||||
|
||||
if (22 === firstChunk[0]) {
|
||||
servername = (sni(firstChunk)||'').toLowerCase();
|
||||
} else if (firstChunk[0] > 32 && firstChunk[0] < 127) {
|
||||
str = firstChunk.toString();
|
||||
m = str.match(/(?:^|[\r\n])Host: ([^\r\n]+)[\r\n]*/im);
|
||||
hostname = (m && m[1].toLowerCase() || '').split(':')[0];
|
||||
}
|
||||
|
||||
if (servername || hostname) {
|
||||
if (servername) {
|
||||
conn.write("TLS with sni is allowed only on standard ports. If you've registered '" + servername + "' use port 443.");
|
||||
} else {
|
||||
conn.write("HTTP with Host headers is not allowed on dynamic ports. If you've registered '" + hostname + "' use port 80.");
|
||||
}
|
||||
conn.end();
|
||||
return;
|
||||
}
|
||||
|
||||
// pipeWs(servername, servicename, client, remote, serviceport)
|
||||
// remote.clients is managed as part of the piping process
|
||||
console.log("[DynTcp] piping to remote", serviceport);
|
||||
pipeWs(null, 'tcp', conn, remote, serviceport)
|
||||
|
||||
process.nextTick(function () { conn.resume(); });
|
||||
});
|
||||
}
|
||||
|
||||
function onWsConnection(ws, upgradeReq) {
|
||||
console.log(ws);
|
||||
if (state.debug) { console.log('[ws] connection'); }
|
||||
var socketId = Packer.socketToId(upgradeReq.socket);
|
||||
var remotes = {};
|
||||
|
||||
|
@ -131,6 +194,7 @@ module.exports.create = function (state) {
|
|||
token.ws = ws;
|
||||
token.upgradeReq = upgradeReq;
|
||||
token.clients = {};
|
||||
token.dynamicPorts = [];
|
||||
|
||||
token.pausedConns = [];
|
||||
ws._socket.on('drain', function () {
|
||||
|
@ -153,11 +217,26 @@ module.exports.create = function (state) {
|
|||
});
|
||||
|
||||
token.domains.forEach(function (domainname) {
|
||||
console.log('domainname', domainname);
|
||||
Devices.add(state.deviceLists, domainname, token);
|
||||
});
|
||||
|
||||
function handleTcpServer() {
|
||||
var serviceport = this.address().port;
|
||||
console.info('[DynTcpConn] Port', serviceport, 'now open for', token.deviceId);
|
||||
token.dynamicPorts.push(serviceport);
|
||||
Devices.add(state.deviceLists, serviceport, token);
|
||||
}
|
||||
|
||||
try {
|
||||
token.server = require('net').createServer(onDynTcpConn).listen(0, handleTcpServer);
|
||||
} catch(e) {
|
||||
// what a wonderful problem it will be the day that this bug needs to be fixed
|
||||
// (i.e. there are enough users to run out of ports)
|
||||
console.error("Error assigning a dynamic port to a new connection:", e);
|
||||
}
|
||||
|
||||
remotes[jwtoken] = token;
|
||||
console.log("added token '" + token.deviceId + "' to websocket", socketId);
|
||||
console.info("[ws] authorized", socketId, "for", token.deviceId);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -172,15 +251,22 @@ module.exports.create = function (state) {
|
|||
remote.domains.forEach(function (domainname) {
|
||||
Devices.remove(state.deviceLists, domainname, remote);
|
||||
});
|
||||
remote.dynamicPorts.forEach(function (portnumber) {
|
||||
Devices.remove(state.deviceLists, portnumber, remote);
|
||||
});
|
||||
remote.ws = null;
|
||||
remote.upgradeReq = null;
|
||||
remote.server.close(function () {
|
||||
console.log("[DynTcpConn] closing server for ", remote.server.address().port);
|
||||
});
|
||||
remote.server = null;
|
||||
|
||||
// Close all of the existing browser connections associated with this websocket connection.
|
||||
Object.keys(remote.clients).forEach(function (cid) {
|
||||
closeBrowserConn(cid);
|
||||
});
|
||||
delete remotes[jwtoken];
|
||||
console.log("removed token '" + remote.deviceId + "' from websocket", socketId);
|
||||
console.log("[ws] removed token '" + remote.deviceId + "' from", socketId);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -236,7 +322,7 @@ module.exports.create = function (state) {
|
|||
// We only ever send one command and we send it once, so we just hard coded the ID as 1.
|
||||
if (cmd[0] === -1) {
|
||||
if (cmd[1]) {
|
||||
console.log('received error response to hello from', socketId, cmd[1]);
|
||||
console.warn('received error response to hello from', socketId, cmd[1]);
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
@ -262,7 +348,7 @@ module.exports.create = function (state) {
|
|||
|
||||
, onmessage: function (tun) {
|
||||
var cid = Packer.addrToId(tun);
|
||||
console.log("remote '" + logName() + "' has data for '" + cid + "'", tun.data.byteLength);
|
||||
if (state.debug) { console.log("remote '" + logName() + "' has data for '" + cid + "'", tun.data.byteLength); }
|
||||
|
||||
var browserConn = getBrowserConn(cid);
|
||||
if (!browserConn) {
|
||||
|
@ -319,7 +405,7 @@ module.exports.create = function (state) {
|
|||
}
|
||||
, onerror: function (tun) {
|
||||
var cid = Packer.addrToId(tun);
|
||||
console.log('[TunnelError]', cid, tun.message);
|
||||
console.warn('[TunnelError]', cid, tun.message);
|
||||
closeBrowserConn(cid);
|
||||
}
|
||||
};
|
||||
|
@ -343,7 +429,7 @@ module.exports.create = function (state) {
|
|||
// Otherwise we check to see if the pong has also timed out, and if not we send a ping
|
||||
// and call this function again when the pong will have timed out.
|
||||
else if (silent < activityTimeout + pongTimeout) {
|
||||
console.log('pinging', logName());
|
||||
if (state.debug) { console.log('pinging', logName()); }
|
||||
try {
|
||||
ws.ping();
|
||||
} catch (err) {
|
||||
|
@ -355,7 +441,7 @@ module.exports.create = function (state) {
|
|||
// Last case means the ping we sent before didn't get a response soon enough, so we
|
||||
// need to close the websocket connection.
|
||||
else {
|
||||
console.log('home cloud', logName(), 'connection timed out');
|
||||
console.warn('home cloud', logName(), 'connection timed out');
|
||||
ws.close(1013, 'connection timeout');
|
||||
}
|
||||
}
|
||||
|
@ -367,14 +453,14 @@ module.exports.create = function (state) {
|
|||
ws.on('pong', refreshTimeout);
|
||||
ws.on('message', function forwardMessage(chunk) {
|
||||
refreshTimeout();
|
||||
console.log('message from home cloud to tunneler to browser', chunk.byteLength);
|
||||
if (state.debug) { console.log('[ws] device => client : demultiplexing message ', chunk.byteLength, 'bytes'); }
|
||||
//console.log(chunk.toString());
|
||||
unpacker.fns.addChunk(chunk);
|
||||
});
|
||||
|
||||
function hangup() {
|
||||
clearTimeout(timeoutId);
|
||||
console.log('home cloud', logName(), 'connection closing');
|
||||
console.log('[ws] device hangup', logName(), 'connection closing');
|
||||
Object.keys(remotes).forEach(function (jwtoken) {
|
||||
removeToken(jwtoken);
|
||||
});
|
||||
|
|
Loading…
Reference in New Issue