diff --git a/.awsbox.json b/.awsbox.json new file mode 100644 index 0000000000000000000000000000000000000000..3f0ea469b9b1502e682f2f78eed9b5a0d654879f --- /dev/null +++ b/.awsbox.json @@ -0,0 +1,21 @@ +{ + "processes": [ + "bin/router", + "bin/proxy", + "bin/dbwriter", + "bin/keysigner", + "bin/verifier", + "bin/browserid" + ], + "env": { + "CONFIG_FILES": "$HOME/code/config/production.json,$HOME/code/config/aws.json,$HOME/config.json" + }, + "hooks": { + "postdeploy": "scripts/awsbox/post_deploy.js", + "poststart": "scripts/show_config.js", + "postcreate": "scripts/awsbox/post_create.sh" + }, + "packages": [ + "mysql-server" + ] +} diff --git a/docs/AWS_DEPLOYMENT.md b/docs/AWS_DEPLOYMENT.md index f91c5d71e5eb4a2025ff7d9e7226f924fad05944..5d31edb280e8b3562dd4626670a970b7846be81e 100644 --- a/docs/AWS_DEPLOYMENT.md +++ b/docs/AWS_DEPLOYMENT.md @@ -17,7 +17,16 @@ In order to use these deploy scripts, you need the following: 1. have built and locally run browserid 2. an ssh key in `~/.ssh/id_rsa.pub` 3. an AWS account that is "signed up" for EC2 - 4. the "DNS secret" that you get from lloyd + 4. (optionally) a secrets bundle that you get from lloyd (for DNS, SSL, and mail setup) + +For the secrets bundle, you'll need gpg to unpack it, and will do +the following: + + $ cd + $ curl -s http://people.mozilla.org/~lhilaiel/persona_goodies.tgz.gpg | gpg -d | tar xvzf - + +You'll be asked for the decryption password from GPG. Get that from +lloyd. Once you have these things, you'll need to relay them to deployment scripts via your environment. you might put something like this @@ -27,8 +36,8 @@ in your `.bashrc`: export AWS_ID=<your id> # This is your Secret Access Key from your AWS Security Credentials export AWS_SECRET=<your secret> - # This is a magic credential you get from lloyd - export BROWSERID_DEPLOY_DNS_KEY=98...33 + # install super magic secrets into your environment + . $HOME/.persona_secrets/env.sh ## Verify the credentials @@ -46,38 +55,42 @@ you can use a different name that is short but meaningful to what you're going to deploy. Once chosen, invoke `deploy.js` like this: $ scripts/deploy.js deploy some_name_i_chose - attempting to set up some_name_i_chose.hacksign.in + awsbox cmd: node_modules/.bin/awsbox create -n some_name_i_chose -p /Users/lth/.persona_secrets/cert.pem -s /Users/lth/.persona_secrets/key.pem -d -u https://some_name_i_chose.personatest.org -x /Users/lth/.persona_secrets/smtp.json + reading .awsbox.json + attempting to set up VM "some_name_i_chose" + ... Checking for DNS availability of some_name_i_chose.personatest.org ... VM launched, waiting for startup (should take about 20s) - ... Instance ready, setting up DNS - ... DNS set up, setting human readable name in aws + ... Adding DNS Record for some_name_i_chose.personatest.org + ... Instance ready, setting human readable name in aws ... name set, waiting for ssh access and configuring - ... nope. not yet. retrying. - ... nope. not yet. retrying. - ... nope. not yet. retrying. + ... adding additional configuration values + ... public url will be: https://some_name_i_chose.personatest.org ... nope. not yet. retrying. ... nope. not yet. retrying. ... victory! server is accessible and configured ... and your git remote is all set up + ... finally, installing custom packages: mysql-server + ... copying up SSL cert Yay! You have your very own deployment. Here's the basics: - 1. deploy your code: git push some_name_i_chose <mybranch>:master - 2. visit your server on the web: https://some_name_i_chose.hacksign.in - 3. test via a website: http://some_name_i_chose.myfavoritebeer.org - 4. ssh in with sudo: ssh ec2-user@some_name_i_chose.hacksign.in - 5. ssh as the deployment user: ssh app@some_name_i_chose.hacksign.in + 1. deploy your code: git push some_name_i_chose HEAD:master + 2. visit your server on the web: https://some_name_i_chose.personatest.org + 3. ssh in with sudo: ssh ec2-user@some_name_i_chose.personatest.org + 4. ssh as the deployment user: ssh app@some_name_i_chose.personatest.org - enjoy! Here's your server details { - "instanceId": "i-8f4beeea", - "imageId": "ami-6900d100", + Here are your server's details: { + "instanceId": "i-f0b35e89", + "imageId": "ami-ac8524c5", "instanceState": { "code": "16", "name": "running" }, - "dnsName": "ec2-184-73-84-132.compute-1.amazonaws.com", - "keyName": "browserid deploy key (4736caec113ccb53aa62bb165c58c17d)", + "dnsName": "ec2-23-21-24-182.compute-1.amazonaws.com", + "keyName": "awsbox deploy key (4736caec113ccb53aa62bb165c58c17d)", "instanceType": "t1.micro", - "ipAddress": "184.73.84.132" + "ipAddress": "23.21.24.182", + "name": "i-f0b35e89" } The output contains instructions for use. Note that every occurance of @@ -125,8 +138,11 @@ These things cost money by the hour, not a lot, but money. So when you want to decommission a VM and release your hold on the DNS name, simply: $ scripts/deploy.js destroy some_name_i_chose - trying to destroy VM for some_name_i_chose.hacksign.in: done - trying to remove DNS for some_name_i_chose.hacksign.in: done + awsbox cmd: node_modules/.bin/awsbox destroy some_name_i_chose + trying to destroy VM for some_name_i_chose: done + trying to remove git remote: done + trying to remove DNS: some_name_i_chose.personatest.org + deleting some_name_i_chose.personatest.org: done ## Overview of what's deployed to VMs @@ -139,18 +155,20 @@ There are several things that are pre-configured for your pleasure: on the server, that you can push to. 3. `post-update` hook: when you push to the `master` branch of the server's git repository, this code restarts your services to pick up the changes. - 4. nginx with SSL and 503 support - you'll get SSL for free and will see + 4. SSL support and 503 support - you'll get SSL for free and will see a reasonable error message when your servers aren't running. 5. a mysql database with a browserid user without any password. ### User accounts -VMs have two pre-configured users, both which you have passphraseless SSH +VMs have three pre-configured users, all of which you have passphraseless SSH access to: * `ec2-user` is an account with full sudo access. * `app` is an account that has no sudo, receives and builds code via git pushes, and runs the application servers. + * `proxy` is the account the the HTTP reverse proxy that front-ends your server + runs as. Feel free to start a new server, and ssh in as `app` to explore all of the configuration. An attempt has been made to isolate as much configuration diff --git a/lib/configuration.js b/lib/configuration.js index abe731e59d8dd5d52b97a22ba29a69fbf3f5e205..9bfccbffa5e15e13569d1dc897d51e498d915e74 100644 --- a/lib/configuration.js +++ b/lib/configuration.js @@ -114,7 +114,8 @@ var conf = module.exports = convict({ smtp: { host: 'string?', user: 'string?', - pass: 'string?' + pass: 'string?', + port: 'integer = 25' }, statsd: { enabled: { diff --git a/lib/email.js b/lib/email.js index 4bb0c50d650bb74a5a8b4d47ac047c14ec71d52c..ee444477bafefe220a3b6bcf432a8766d0e6a209 100644 --- a/lib/email.js +++ b/lib/email.js @@ -13,7 +13,10 @@ logger = require('./logging.js').logger; /* if smtp parameters are configured, use them */ try { var smtp_params = config.get('smtp'); } catch(e) {}; if (smtp_params && smtp_params.host) { - emailer.SMTP = { host: smtp_params.host }; + emailer.SMTP = { + host: smtp_params.host, + port: smtp_params.port + }; logger.info("delivering email via SMTP host: " + emailer.SMTP.host); if (smtp_params.user) { emailer.SMTP.use_authentication = true; diff --git a/package.json b/package.json index c9412e26b9793e18d97d4b6db58b3eaf646723e5..2393fefd1b25665cbb03bcfb8a587a97f7c19aa3 100644 --- a/package.json +++ b/package.json @@ -35,9 +35,8 @@ "winston": "0.5.6" }, "devDependencies": { - "xml2js": "0.1.13", "vows": "0.5.13", - "aws-lib": "0.0.5", + "awsbox": "0.2.7", "irc": "0.3.3" }, "scripts": { diff --git a/scripts/awsbox/post_create.sh b/scripts/awsbox/post_create.sh new file mode 100755 index 0000000000000000000000000000000000000000..33e1de7b316680ddc4761668af17112d9b75985f --- /dev/null +++ b/scripts/awsbox/post_create.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +sudo /sbin/chkconfig mysqld on +sudo /sbin/service mysqld start +sudo mysql -u root < $(dirname "${BASH_SOURCE[0]}")/create_browserid_user.sql +echo "CREATE USER 'browserid'@'localhost';" | mysql -u root +echo "CREATE DATABASE browserid;" | mysql -u root +echo "GRANT ALL ON browserid.* TO 'browserid'@'localhost';" | mysql -u root + diff --git a/scripts/awsbox/post_deploy.js b/scripts/awsbox/post_deploy.js new file mode 100755 index 0000000000000000000000000000000000000000..af133aa60c7652b91cdc1bf66b2a616af507a92c --- /dev/null +++ b/scripts/awsbox/post_deploy.js @@ -0,0 +1,16 @@ +#!/bin/bash + +if [ ! -f $HOME/var/root.cert ] ; then + echo ">> generating keypair" + scripts/generate_ephemeral_keys.sh + mv var/root.{cert,secretkey} $HOME/var +else + echo ">> no keypair needed. you gots one" +fi + +echo ">> updating strings" +svn co -q http://svn.mozilla.org/projects/l10n-misc/trunk/browserid/locale +./locale/compile-mo.sh locale/ + +echo ">> generating production resources" +scripts/compress diff --git a/scripts/deploy.js b/scripts/deploy.js index aebb6c72ff31fdd28b0bc0a31e197f44d53f68b2..fdc90bfa961c042967c8dc469415e6cc09a495f0 100755 --- a/scripts/deploy.js +++ b/scripts/deploy.js @@ -1,155 +1,82 @@ #!/usr/bin/env node -const -aws = require('./deploy/aws.js'); -path = require('path'); -vm = require('./deploy/vm.js'), -key = require('./deploy/key.js'), -ssh = require('./deploy/ssh.js'), -git = require('./deploy/git.js'), -dns = require('./deploy/dns.js'); - -var verbs = {}; - -function checkErr(err) { - if (err) { - process.stderr.write('fatal error: ' + err + "\n"); - process.exit(1); - } -} +var path = require('path'), +child_process = require('child_process'); + +/* + * A thin wrapper around awsbox that expects certain env + * vars and invokes awsbox for ya to deploy a VM. + */ -function printInstructions(name, deets) { - console.log("Yay! You have your very own deployment. Here's the basics:\n"); - console.log(" 1. deploy your code: git push " + name + " <mybranch>:master"); - console.log(" 2. visit your server on the web: https://" + name + ".hacksign.in"); - console.log(" 3. test via a website: http://" + name + ".myfavoritebeer.org"); - console.log(" 4. ssh in with sudo: ssh ec2-user@" + name + ".hacksign.in"); - console.log(" 5. ssh as the deployment user: ssh app@" + name + ".hacksign.in\n"); - console.log("enjoy! Here's your server details", JSON.stringify(deets, null, 4)); +if (!process.env['AWS_ID'] || ! process.env['AWS_SECRET']) { + console.log("You haven't defined AWS_ID and AWS_SECRET in the environment"); + console.log("Get these values from the amazon web console and try again."); + process.exit(1); } -function validateName(name) { - if (!/^[a-z][0-9a-z_\-]*$/.test(name)) { - throw "invalid name! must be a valid dns fragment ([z-a0-9\-_])"; - } +if (!process.env['ZERIGO_DNS_KEY'] && process.env['PERSONA_DEPLOY_DNS_KEY']) { + process.env['ZERIGO_DNS_KEY'] = process.env['PERSONA_DEPLOY_DNS_KEY']; } -verbs['destroy'] = function(args) { - if (!args || args.length != 1) { - throw 'missing required argument: name of instance'; +var cmd = path.join(__dirname, '..', 'node_modules', '.bin', 'awsbox'); +cmd = path.relative(process.env['PWD'], cmd); + +if (process.argv.length > 1 && + process.argv[2] === 'create' || + process.argv[2] === 'deploy') +{ + var options = {}; + + if (process.argv.length > 3) options.n = process.argv[3]; + + if (process.env['PERSONA_SSL_PRIV'] || process.env['PERSONA_SSL_PUB']) { + options.p = process.env['PERSONA_SSL_PUB']; + options.s = process.env['PERSONA_SSL_PRIV']; } - var name = args[0]; - validateName(name); - var hostname = name + ".hacksign.in"; - - process.stdout.write("trying to destroy VM for " + hostname + ": "); - vm.destroy(name, function(err, deets) { - console.log(err ? ("failed: " + err) : "done"); - process.stdout.write("trying to remove DNS for " + hostname + ": "); - dns.deleteRecord(hostname, function(err) { - console.log(err ? "failed: " + err : "done"); - if (deets && deets.ipAddress) { - process.stdout.write("trying to remove git remote: "); - git.removeRemote(name, deets.ipAddress, function(err) { - console.log(err ? "failed: " + err : "done"); - }); - } - }); - }); -} -verbs['test'] = function() { - // let's see if we can contact aws and zerigo - process.stdout.write("Checking DNS management access: "); - dns.inUse("somerandomname", function(err) { - console.log(err ? "NOT ok: " + err : "good"); - process.stdout.write("Checking AWS access: "); - vm.list(function(err) { - console.log(err ? "NOT ok: " + err : "good"); - }); - }); -} + if (process.env['ZERIGO_DNS_KEY']) { + options.d = true; -verbs['deploy'] = function(args) { - if (!args || args.length != 1) { - throw 'missing required argument: name of instance'; + // when we have a DNS key, we can set a hostname! + var scheme = (options.p ? 'https' : 'http') + '://'; + + if (process.env['PERSONA_DEPLOYMENT_HOSTNAME']) { + options.u = scheme + process.env['PERSONA_DEPLOYMENT_HOSTNAME']; + } else if (options.n) { + options.u = scheme + options.n + ".personatest.org"; + } + + } else { + console.log('WARNING: No DNS key defined in the environment! ' + + 'I cannot set up DNS for you. We\'ll do this by IP.'); } - var name = args[0]; - validateName(name); - var hostname = name + ".hacksign.in"; - var longName = 'browserid deployment (' + name + ')'; - - console.log("attempting to set up " + name + ".hacksign.in"); - - dns.inUse(hostname, function(err, r) { - checkErr(err); - if (r) checkErr("sorry! that name '" + name + "' is already being used. so sad"); - - vm.startImage(function(err, r) { - checkErr(err); - console.log(" ... VM launched, waiting for startup (should take about 20s)"); - - vm.waitForInstance(r.instanceId, function(err, deets) { - checkErr(err); - console.log(" ... Instance ready, setting up DNS"); - dns.updateRecord(name, "hacksign.in", deets.ipAddress, function(err) { - checkErr(err); - console.log(" ... DNS set up, setting human readable name in aws"); - - vm.setName(r.instanceId, longName, function(err) { - checkErr(err); - console.log(" ... name set, waiting for ssh access and configuring"); - var config = { public_url: "https://" + name + ".hacksign.in"}; - - ssh.copyUpConfig(deets.ipAddress, config, function(err, r) { - checkErr(err); - console.log(" ... victory! server is accessible and configured"); - git.addRemote(name, deets.ipAddress, function(err, r) { - if (err && /already exists/.test(err)) { - console.log("OOPS! you already have a git remote named 'test'!"); - console.log("to create a new one: git remote add <name> " + - "app@" + deets.ipAddress + ":git"); - } else { - checkErr(err); - } - console.log(" ... and your git remote is all set up"); - console.log(""); - printInstructions(name, deets); - }); - }); - }); - }); - }); - }); - }); -}; -verbs['list'] = function(args) { - vm.list(function(err, r) { - checkErr(err); - console.log(JSON.stringify(r, null, 2)); - }); -}; - -var error = (process.argv.length <= 2); - -if (!error) { - var verb = process.argv[2]; - if (!verbs[verb]) error = "no such command: " + verb; - else { - try { - verbs[verb](process.argv.slice(3)); - } catch(e) { - error = "error running '" + verb + "' command: " + e; + // pass through/override with user provided vars + for (var i = 3; i < process.argv.length; i++) { + var k = process.argv[i]; + if (i + 1 < process.argv.length && k.length === 2 && k[0] === '-') { + options[k[1]] = process.argv[++i]; } } -} -if (error) { - if (typeof error === 'string') process.stderr.write('fatal error: ' + error + "\n\n"); + if (process.env['PERSONA_EPHEMERAL_CONFIG']) { + options.x = process.env['PERSONA_EPHEMERAL_CONFIG']; + } - process.stderr.write('A command line tool to deploy BrowserID onto Amazon\'s EC2\n'); - process.stderr.write('Usage: ' + path.basename(__filename) + - ' <' + Object.keys(verbs).join('|') + "> [args]\n"); - process.exit(1); + cmd += " create --ssl=force"; + + Object.keys(options).forEach(function(opt) { + cmd += " -" + opt; + cmd += typeof options[opt] === 'string' ? " " + options[opt] : ""; + }); +} else { + cmd += " " + process.argv.slice(2).join(' '); } + +console.log("awsbox cmd: " + cmd); +var cp = child_process.exec(cmd, function(err) { + if (err) process.exit(err.code); + else process.exit(0); +}); +cp.stdout.pipe(process.stdout); +cp.stderr.pipe(process.stderr); diff --git a/scripts/deploy/aws.js b/scripts/deploy/aws.js deleted file mode 100644 index 6641989e9a8dc348d0b4d843d4c594386ec64a91..0000000000000000000000000000000000000000 --- a/scripts/deploy/aws.js +++ /dev/null @@ -1,7 +0,0 @@ -const -awslib = require('aws-lib'); - -module.exports = awslib.createEC2Client(process.env['AWS_ID'], process.env['AWS_SECRET'], { - version: '2011-12-15' -}); - diff --git a/scripts/deploy/dns.js b/scripts/deploy/dns.js deleted file mode 100644 index 99a2f588788e117ce87958508177ec52155e4ce0..0000000000000000000000000000000000000000 --- a/scripts/deploy/dns.js +++ /dev/null @@ -1,83 +0,0 @@ -const -http = require('http'), -xml2js = new (require('xml2js')).Parser(), -jsel = require('JSONSelect'); - -const envVar = 'BROWSERID_DEPLOY_DNS_KEY'; -if (!process.env[envVar]) { - throw "Missing api key! contact lloyd and set the key in your env: " - + envVar; -} - -const api_key = process.env[envVar]; - -function doRequest(method, path, body, cb) { - var req = http.request({ - auth: 'lloyd@hilaiel.com:' + api_key, - host: 'ns.zerigo.com', - port: 80, - path: path, - method: method, - headers: { - 'Content-Type': 'application/xml', - 'Content-Length': body ? body.length : 0 - } - }, function(r) { - if ((r.statusCode / 100).toFixed(0) != 2 && - r.statusCode != 404) { - return cb("non 200 status: " + r.statusCode); - } - buf = ""; - r.on('data', function(chunk) { - buf += chunk; - }); - r.on('end', function() { - xml2js.parseString(buf, cb); - }); - }); - if (body) req.write(body); - req.end(); -}; - -exports.updateRecord = function (hostname, zone, ip, cb) { - doRequest('GET', '/api/1.1/zones.xml', null, function(err, r) { - if (err) return cb(err); - var m = jsel.match('object:has(:root > .domain:val(?)) > .id .#', - [ zone ], r); - if (m.length != 1) return cb("couldn't extract domain id from zerigo"); - var path = '/api/1.1/hosts.xml?zone_id=' + m[0]; - var body = '<host><data>' + ip + '</data><host-type>A</host-type>'; - body += '<hostname>' + hostname + '</hostname>' - body += '</host>'; - doRequest('POST', path, body, function(err, r) { - cb(err); - }); - }); -}; - -exports.deleteRecord = function (hostname, cb) { - doRequest('GET', '/api/1.1/hosts.xml?fqdn=' + hostname, null, function(err, r) { - if (err) return cb(err); - var m = jsel.match('.host .id > .#', r); - if (!m.length) return cb("no such DNS record"); - function deleteOne() { - if (!m.length) return cb(null); - var one = m.shift(); - doRequest('DELETE', '/api/1.1/hosts/' + one + '.xml', null, function(err) { - if (err) return cb(err); - deleteOne(); - }); - } - deleteOne(); - }); -}; - -exports.inUse = function (hostname, cb) { - doRequest('GET', '/api/1.1/hosts.xml?fqdn=' + hostname, null, function(err, r) { - if (err) return cb(err); - var m = jsel.match('.host', r); - // we shouldn't have multiple! oops! let's return the first one - if (m.length) return cb(null, m[0]); - cb(null, null); - }); -} diff --git a/scripts/deploy/git.js b/scripts/deploy/git.js deleted file mode 100644 index 4fc20d10a31307ca53abf50ac0b963c6afb15e74..0000000000000000000000000000000000000000 --- a/scripts/deploy/git.js +++ /dev/null @@ -1,120 +0,0 @@ -const -child_process = require('child_process'); -spawn = child_process.spawn, -path = require('path'); - -exports.addRemote = function(name, host, cb) { - var cmd = 'git remote add ' + name + ' app@'+ host + ':git'; - child_process.exec(cmd, cb); -}; - -// remove a remote, but only if it is pointed to a specific -// host. This will keep deploy from killing manuall remotes -// that you've set up -exports.removeRemote = function(name, host, cb) { - var desired = 'app@'+ host + ':git'; - var cmd = 'git remote -v show | grep push'; - child_process.exec(cmd, function(err, r) { - try { - var remotes = {}; - r.split('\n').forEach(function(line) { - if (!line.length) return; - var line = line.split('\t'); - if (!line.length == 2) return; - remotes[line[0]] = line[1].split(" ")[0]; - }); - if (remotes[name] && remotes[name] === desired) { - child_process.exec('git remote rm ' + name, cb); - } else { - throw "no such remote"; - } - } catch(e) { - cb(e); - } - }); -}; - -exports.currentSHA = function(dir, cb) { - if (typeof dir === 'function' && cb === undefined) { - cb = dir; - dir = path.join(__dirname, '..', '..'); - } - console.log(dir); - - var p = spawn('git', [ 'log', '--pretty=%h', '-1' ], { - env: { GIT_DIR: path.join(dir, ".git") } - }); - var buf = ""; - p.stdout.on('data', function(d) { - buf += d; - }); - p.on('exit', function(code, signal) { - console.log(buf); - var gitsha = buf.toString().trim(); - if (gitsha && gitsha.length === 7) { - return cb(null, gitsha); - } - cb("can't extract git sha from " + dir); - }); -}; - -function splitAndEmit(chunk, cb) { - if (chunk) chunk = chunk.toString(); - if (typeof chunk === 'string') { - chunk.split('\n').forEach(function (line) { - line = line.trim(); - if (line.length) cb(line); - }); - } -} - -exports.push = function(dir, host, pr, cb) { - if (typeof host === 'function' && cb === undefined) { - cb = pr; - pr = host; - host = dir; - dir = path.join(__dirname, '..', '..'); - } - - var p = spawn('git', [ 'push', 'app@' + host + ":git", 'dev:master' ], { - env: { - GIT_DIR: path.join(dir, ".git"), - GIT_WORK_TREE: dir - } - }); - p.stdout.on('data', function(c) { splitAndEmit(c, pr); }); - p.stderr.on('data', function(c) { splitAndEmit(c, pr); }); - p.on('exit', function(code, signal) { - return cb(code = 0); - }); -}; - -exports.pull = function(dir, remote, branch, pr, cb) { - var p = spawn('git', [ 'pull', "-f", remote, branch + ":" + branch ], { - env: { - GIT_DIR: path.join(dir, ".git"), - GIT_WORK_TREE: dir, - PWD: dir - }, - cwd: dir - }); - - p.stdout.on('data', function(c) { splitAndEmit(c, pr); }); - p.stderr.on('data', function(c) { splitAndEmit(c, pr); }); - - p.on('exit', function(code, signal) { - return cb(code = 0); - }); -} - -exports.init = function(dir, cb) { - var p = spawn('git', [ 'init' ], { - env: { - GIT_DIR: path.join(dir, ".git"), - GIT_WORK_TREE: dir - } - }); - p.on('exit', function(code, signal) { - return cb(code = 0); - }); -}; diff --git a/scripts/deploy/key.js b/scripts/deploy/key.js deleted file mode 100644 index d93da0158159b5d43f7361189c7fda12381e97ae..0000000000000000000000000000000000000000 --- a/scripts/deploy/key.js +++ /dev/null @@ -1,57 +0,0 @@ -const -aws = require('./aws.js'), -path = require('path'), -fs = require('fs'), -child_process = require('child_process'), -jsel = require('JSONSelect'), -crypto = require('crypto'); - -const keyPath = process.env['PUBKEY'] || path.join(process.env['HOME'], ".ssh", "id_rsa.pub"); - -exports.read = function(cb) { - fs.readFile(keyPath, cb); -}; - -exports.fingerprint = function(cb) { - exports.read(function(err, buf) { - if (err) return cb(err); - var b = new Buffer(buf.toString().split(' ')[1], 'base64'); - var md5sum = crypto.createHash('md5'); - md5sum.update(b); - cb(null, md5sum.digest('hex')); - }); -/* - child_process.exec( - "ssh-keygen -lf " + keyPath, - function(err, r) { - if (!err) r = r.split(' ')[1]; - cb(err, r); - }); -*/ -}; - -exports.getName = function(cb) { - exports.fingerprint(function(err, fingerprint) { - if (err) return cb(err); - - var keyName = "browserid deploy key (" + fingerprint + ")"; - - // is this fingerprint known? - aws.call('DescribeKeyPairs', {}, function(result) { - var found = jsel.match(":has(.keyName:val(?)) > .keyName", [ keyName ], result); - if (found.length) return cb(null, keyName); - - // key isn't yet installed! - exports.read(function(err, key) { - aws.call('ImportKeyPair', { - KeyName: keyName, - PublicKeyMaterial: new Buffer(key).toString('base64') - }, function(result) { - if (!result) return cb('null result from ec2 on key addition'); - if (result.Errors) return cb(result.Errors.Error.Message); - cb(null, keyName); - }); - }); - }); - }); -}; diff --git a/scripts/deploy/sec.js b/scripts/deploy/sec.js deleted file mode 100644 index d8211692677c1e0d3775f7abef89beffd3e6c737..0000000000000000000000000000000000000000 --- a/scripts/deploy/sec.js +++ /dev/null @@ -1,59 +0,0 @@ -const -aws = require('./aws.js'); -jsel = require('JSONSelect'), -key = require('./key.js'); - -// every time you change the security group, change this version number -// so new deployments will create a new group with the changes -const SECURITY_GROUP_VERSION = 1; - -function createError(msg, r) { - var m = jsel.match('.Message', r); - if (m.length) msg += ": " + m[0]; - return msg; -} - -exports.getName = function(cb) { - var groupName = "browserid group v" + SECURITY_GROUP_VERSION; - - // is this fingerprint known? - aws.call('DescribeSecurityGroups', { - GroupName: groupName - }, function(r) { - if (jsel.match('.Code:val("InvalidGroup.NotFound")', r).length) { - aws.call('CreateSecurityGroup', { - GroupName: groupName, - GroupDescription: 'A security group for browserid deployments' - }, function(r) { - if (!r || !r.return === 'true') { - return cb(createError('failed to create security group', r)); - } - aws.call('AuthorizeSecurityGroupIngress', { - GroupName: groupName, - "IpPermissions.1.IpProtocol": 'tcp', - "IpPermissions.1.FromPort": 80, - "IpPermissions.1.ToPort": 80, - "IpPermissions.1.IpRanges.1.CidrIp": "0.0.0.0/0", - "IpPermissions.2.IpProtocol": 'tcp', - "IpPermissions.2.FromPort": 22, - "IpPermissions.2.ToPort": 22, - "IpPermissions.2.IpRanges.1.CidrIp": "0.0.0.0/0", - "IpPermissions.3.IpProtocol": 'tcp', - "IpPermissions.3.FromPort": 443, - "IpPermissions.3.ToPort": 443, - "IpPermissions.3.IpRanges.1.CidrIp" : "0.0.0.0/0" - }, function(r) { - if (!r || !r.return === 'true') { - return cb(createError('failed to create security group', r)); - } - cb(null, groupName); - }); - }); - } else { - // already exists? - var m = jsel.match('.securityGroupInfo > .item > .groupName', r); - if (m.length && m[0] === groupName) return cb(null, groupName); - cb(createError('error creating group', r)); - } - }); -}; diff --git a/scripts/deploy/ssh.js b/scripts/deploy/ssh.js deleted file mode 100644 index 290abf1d322745ef8f4fbc17eac90345f289e606..0000000000000000000000000000000000000000 --- a/scripts/deploy/ssh.js +++ /dev/null @@ -1,43 +0,0 @@ -const -child_process = require('child_process'), -temp = require('temp'), -fs = require('fs'); - -const MAX_TRIES = 20; - -exports.copyUpConfig = function(host, config, cb) { - var tries = 0; - temp.open({}, function(err, r) { - fs.writeFileSync(r.path, JSON.stringify(config, null, 4)); - var cmd = 'scp -o "StrictHostKeyChecking no" ' + r.path + ' app@' + host + ":config.json"; - function oneTry() { - child_process.exec(cmd, function(err, r) { - if (err) { - if (++tries > MAX_TRIES) return cb("can't connect via SSH. stupid amazon"); - console.log(" ... nope. not yet. retrying."); - setTimeout(oneTry, 5000); - } else { - cb(); - } - }); - } - oneTry(); - }); -}; - -exports.copySSL = function(host, pub, priv, cb) { - var cmd = 'scp -o "StrictHostKeyChecking no" ' + pub + ' ec2-user@' + host + ":/etc/ssl/certs/hacksign.in.crt"; - child_process.exec(cmd, function(err, r) { - if (err) return cb(err); - var cmd = 'scp -o "StrictHostKeyChecking no" ' + priv + ' ec2-user@' + host + ":/etc/ssl/certs/hacksign.in.key"; - child_process.exec(cmd, function(err, r) { - var cmd = 'ssh -o "StrictHostKeyChecking no" ec2-user@' + host + " 'sudo /etc/init.d/nginx restart'"; - child_process.exec(cmd, cb); - }); - }); -}; - -exports.addSSHPubKey = function(host, pubkey, cb) { - var cmd = 'ssh -o "StrictHostKeyChecking no" ec2-user@' + host + " 'echo \'" + pubkey + "\' >> .ssh/authorized_keys'"; - child_process.exec(cmd, cb); -}; diff --git a/scripts/deploy/vm.js b/scripts/deploy/vm.js deleted file mode 100644 index de38451d5b62993a03080d7d21da0d951d88099d..0000000000000000000000000000000000000000 --- a/scripts/deploy/vm.js +++ /dev/null @@ -1,121 +0,0 @@ -const -aws = require('./aws.js'); -jsel = require('JSONSelect'), -key = require('./key.js'), -sec = require('./sec.js'); - -const BROWSERID_TEMPLATE_IMAGE_ID = 'ami-6ed07107'; - -function extractInstanceDeets(horribleBlob) { - var instance = {}; - ["instanceId", "imageId", "instanceState", "dnsName", "keyName", "instanceType", - "ipAddress"].forEach(function(key) { - if (horribleBlob[key]) instance[key] = horribleBlob[key]; - }); - var name = jsel.match('.tagSet :has(.key:val("Name")) > .value', horribleBlob); - if (name.length) { - instance.fullName = name[0]; - // if this is a 'browserid deployment', we'll only display the hostname chosen by the - // user - var m = /^browserid deployment \((.*)\)$/.exec(instance.fullName); - instance.name = m ? m[1] : instance.fullName; - } else { - instance.name = instance.instanceId; - } - return instance; -} - -exports.list = function(cb) { - aws.call('DescribeInstances', {}, function(result) { - var instances = {}; - var i = 1; - jsel.forEach( - '.instancesSet > .item:has(.instanceState .name:val("running"))', - result, function(item) { - var deets = extractInstanceDeets(item); - instances[deets.name || 'unknown ' + i++] = deets; - }); - cb(null, instances); - }); -}; - -exports.destroy = function(name, cb) { - exports.list(function(err, r) { - if (err) return cb('failed to list vms: ' + err); - if (!r[name]) return cb('no such vm'); - - aws.call('TerminateInstances', { - InstanceId: r[name].instanceId - }, function(result) { - try { return cb(result.Errors.Error.Message); } catch(e) {}; - cb(null, r[name]); - }); - }); -}; - -function returnSingleImageInfo(result, cb) { - if (!result) return cb('no results from ec2 api'); - try { return cb(result.Errors.Error.Message); } catch(e) {}; - try { - result = jsel.match('.instancesSet > .item', result)[0]; - cb(null, extractInstanceDeets(result)); - } catch(e) { - return cb("couldn't extract new instance details from ec2 response: " + e); - } -} - -exports.startImage = function(cb) { - key.getName(function(err, keyName) { - if (err) return cb(err); - sec.getName(function(err, groupName) { - if (err) return cb(err); - aws.call('RunInstances', { - ImageId: BROWSERID_TEMPLATE_IMAGE_ID, - KeyName: keyName, - SecurityGroup: groupName, - InstanceType: 't1.micro', - MinCount: 1, - MaxCount: 1 - }, function (result) { - returnSingleImageInfo(result, cb); - }); - }); - }); -}; - -exports.waitForInstance = function(id, cb) { - aws.call('DescribeInstanceStatus', { - InstanceId: id - }, function(r) { - if (!r) return cb('no response from ec2'); - // we're waiting and amazon might not have created the image yet! that's - // not an error, just an api timing quirk - var waiting = jsel.match('.Error .Code:val("InvalidInstanceID.NotFound")', r); - if (waiting.length) { - return setTimeout(function(){ exports.waitForInstance(id, cb); }, 1000); - } - - if (!r.instanceStatusSet) return cb('malformed response from ec2' + JSON.stringify(r, null, 2)); - if (Object.keys(r.instanceStatusSet).length) { - var deets = extractInstanceDeets(r.instanceStatusSet.item); - if (deets && deets.instanceState && deets.instanceState.name === 'running') { - return aws.call('DescribeInstances', { InstanceId: id }, function(result) { - returnSingleImageInfo(result, cb); - }); - } - } - setTimeout(function(){ exports.waitForInstance(id, cb); }, 1000); - }); -}; - -exports.setName = function(id, name, cb) { - aws.call('CreateTags', { - "ResourceId.0": id, - "Tag.0.Key": 'Name', - "Tag.0.Value": name - }, function(result) { - if (result && result.return === 'true') return cb(null); - try { return cb(result.Errors.Error.Message); } catch(e) {}; - return cb('unknown error setting instance name'); - }); -}; diff --git a/scripts/deploy_dev.js b/scripts/deploy_dev.js deleted file mode 100755 index 21c2308170cc8ca5a2ab8a2de76e4653575bdcc6..0000000000000000000000000000000000000000 --- a/scripts/deploy_dev.js +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env node - -/* - * Deploy dev.diresworb.org, for fun and profit. - */ - -const -aws = require('./deploy/aws.js'); -path = require('path'); -vm = require('./deploy/vm.js'), -key = require('./deploy/key.js'), -ssh = require('./deploy/ssh.js'), -git = require('./deploy/git.js'), -dns = require('./deploy/dns.js'), -util = require('util'), -events = require('events'), -fs = require('fs'); - -// verify we have files we need - -// a class capable of deploying and emmitting events along the way -function DevDeployer() { - events.EventEmitter.call(this); - - this.sslpub = process.env['DEV_SSL_PUB']; - this.sslpriv = process.env['DEV_SSL_PRIV']; - this.keypairs = []; - if (process.env['ADDITIONAL_KEYPAIRS']) { - this.keypairs = process.env['ADDITIONAL_KEYPAIRS'].split(','); - } - - if (!this.sslpub || !this.sslpriv) { - throw("you must provide ssl cert paths via DEV_SSL_PUB & DEV_SSL_PRIV"); - } - - if (!fs.statSync(this.sslpub).isFile() || !fs.statSync(this.sslpriv).isFile()) { - throw("DEV_SSL_PUB & DEV_SSL_PRIV must be paths to actual files. duh"); - } -} - -util.inherits(DevDeployer, events.EventEmitter); - -DevDeployer.prototype.setup = function(cb) { - var self = this; - git.currentSHA(function(err, r) { - if (err) return cb(err); - self.sha = r; - vm.startImage(function(err, r) { - if (err) return cb(err); - self.emit('progress', "starting new image"); - vm.waitForInstance(r.instanceId, function(err, d) { - if (err) return cb(err); - self.deets = d; - self.emit('progress', "image started"); - vm.setName(r.instanceId, "dev.diresworb.org (" + self.sha + ")", function(err, r) { - if (err) return cb(err); - self.emit('progress', "name set"); - cb(null); - }); - }); - }); - }); -} - -DevDeployer.prototype.configure = function(cb) { - var self = this; - var config = { public_url: "https://dev.diresworb.org" }; - ssh.copyUpConfig(self.deets.ipAddress, config, function (err) { - if (err) return cb(err); - ssh.copySSL(self.deets.ipAddress, self.sslpub, self.sslpriv, function(err) { - if (err) return cb(err); - - // now copy up addtional keypairs - var i = 0; - function copyNext() { - if (i == self.keypairs.length) return cb(null); - ssh.addSSHPubKey(self.deets.ipAddress, self.keypairs[i++], function(err) { - if (err) return cb(err); - self.emit('progress', "key added..."); - copyNext(); - }); - } - copyNext(); - }); - }); -} - -DevDeployer.prototype.pushCode = function(cb) { - var self = this; - git.push(this.deets.ipAddress, function(d) { self.emit('build_output', d); }, cb); -} - -DevDeployer.prototype.updateDNS = function(cb) { - var self = this; - dns.deleteRecord('dev.diresworb.org', function() { - dns.updateRecord('', 'dev.diresworb.org', self.deets.ipAddress, cb); - }); -} - -var deployer = new DevDeployer(); - -deployer.on('progress', function(d) { - console.log("PR: " + d); -}); - -deployer.on('build_output', function(d) { - console.log("BO: " + d); -}); - -function checkerr(err) { - if (err) { - process.stderr.write("fatal error: " + err + "\n"); - process.exit(1); - } -} - -var startTime = new Date(); -deployer.setup(function(err) { - checkerr(err); - deployer.configure(function(err) { - checkerr(err); - deployer.updateDNS(function(err) { - checkerr(err); - deployer.pushCode(function(err) { - checkerr(err); - console.log("dev.diresworb.org (" + deployer.sha + ") deployed to " + - deployer.deets.ipAddress + " in " + - ((new Date() - startTime) / 1000.0).toFixed(2) + "s"); - }); - }); - }); -}); diff --git a/scripts/deploy_server.js b/scripts/deploy_server.js deleted file mode 100755 index f38338ee1bba2f6338472d75c2dd3a3385df2a97..0000000000000000000000000000000000000000 --- a/scripts/deploy_server.js +++ /dev/null @@ -1,293 +0,0 @@ -#!/usr/bin/env node - -const -temp = require('temp'), -path = require('path'), -util = require('util'), -events = require('events'), -git = require('./deploy/git.js'), -https = require('https'), -vm = require('./deploy/vm.js'), -jsel = require('JSONSelect'), -fs = require('fs'), -express = require('express'), -irc = require('irc'); - -console.log("deploy server starting up"); - -// a class capable of deploying and emmitting events along the way -function Deployer() { - events.EventEmitter.call(this); - - // a directory where we'll keep code - this._codeDir = process.env['CODE_DIR'] || temp.mkdirSync(); - console.log("code dir is:", this._codeDir); - var self = this; - - git.init(this._codeDir, function(err) { - if (err) { - console.log("can't init code dir:", err); - process.exit(1); - } - self.emit('ready'); - }); -} - -util.inherits(Deployer, events.EventEmitter); - -Deployer.prototype._getLatestRunningSHA = function(cb) { - var self = this; - - // failure is not fatal. maybe nothing is running? - var fail = function(err) { - self.emit('info', { msg: "can't get current running sha", reason: err }); - cb(null, null); - } - - https.get({ host: 'dev.diresworb.org', path: '/ver.txt' }, function(res) { - var buf = ""; - res.on('data', function (c) { buf += c }); - res.on('end', function() { - try { - var sha = buf.split(' ')[0]; - if (sha.length == 7) { - self.emit('info', 'latest running is ' + sha); - return cb(null, sha); - } - fail('malformed ver.txt: ' + buf); - } catch(e) { - fail(e); - } - }); - }).on('error', function(err) { - fail(err); - }); - -} - -Deployer.prototype._cleanUpOldVMs = function() { - var self = this; - // what's our sha - git.currentSHA(self._codeDir, function(err, latest) { - if (err) return self.emit('info', err); - vm.list(function(err, r) { - if (err) return self.emit('info', err); - // only check the vms that have 'dev.diresworb.org' as a name - jsel.forEach("object:has(:root > .name:contains(?))", [ "dev.diresworb.org" ], r, function(o) { - // don't delete the current one - if (o.name.indexOf(latest) == -1) { - self.emit('info', 'decommissioning VM: ' + o.name + ' - ' + o.instanceId); - vm.destroy(o.name, function(err, r) { - if (err) self.emit('info', 'decomissioning failed: ' + err); - else self.emit('info', 'decomissioning succeeded of ' + r); - }) - } - }); - }); - }); -} - -Deployer.prototype._deployNewCode = function(cb) { - var self = this; - - function splitAndEmit(chunk) { - if (chunk) chunk = chunk.toString(); - if (typeof chunk === 'string') { - chunk.split('\n').forEach(function (line) { - line = line.trim(); - if (line.length) self.emit('progress', line); - }); - } - } - - var npmInstall = spawn('npm', [ 'install' ], { cwd: self._codeDir }); - - npmInstall.stdout.on('data', splitAndEmit); - npmInstall.stderr.on('data', splitAndEmit); - - npmInstall.on('exit', function(code, signal) { - if (code != 0) { - self.emit('error', "can't npm install to prepare to run deploy_dev"); - return; - } - var p = spawn('scripts/deploy_dev.js', [], { cwd: self._codeDir }); - - p.stdout.on('data', splitAndEmit); - p.stderr.on('data', splitAndEmit); - - p.on('exit', function(code, signal) { - return cb(code != 0); - }); - }); -}; - -Deployer.prototype._pullLatest = function(cb) { - var self = this; - git.pull(this._codeDir, 'git://github.com/mozilla/browserid', 'dev', function(l) { - self.emit('progress', l); - }, function(err) { - if (err) return cb(err); - git.currentSHA(self._codeDir, function(err, latest) { - if (err) return cb(err); - self.emit('info', 'latest available sha is ' + latest); - self._getLatestRunningSHA(function(err, running) { - if (latest != running) { - self.emit('deployment_begins', { - sha: latest, - }); - var startTime = new Date(); - - self._deployNewCode(function(err, res) { - if (err) return cb(err); - // deployment is complete! - self.emit('deployment_complete', { - sha: latest, - time: (new Date() - startTime) - }); - // finally, let's clean up old servers - self._cleanUpOldVMs(); - cb(null, null); - }); - } else { - self.emit('info', 'up to date'); - cb(null, null); - } - }); - }); - }); -} - -// may be invoked any time we suspect updates have occured to re-deploy -// if needed -Deployer.prototype.checkForUpdates = function() { - var self = this; - - if (this._busy) return; - - this._busy = true; - self.emit('info', 'checking for updates'); - - self._pullLatest(function(err, sha) { - if (err) self.emit('error', err); - self._busy = false; - }); -} - -var deployer = new Deployer(); - -var currentLogFile = null; -// a directory where we'll keep deployment logs -var deployLogDir = process.env['DEPLOY_LOG_DIR'] || temp.mkdirSync(); - -var deployingSHA = null; - -console.log("deployment log dir is:", deployLogDir); - -[ 'info', 'ready', 'error', 'deployment_begins', 'deployment_complete', 'progress' ].forEach(function(evName) { - deployer.on(evName, function(data) { - if (typeof data != 'string') data = JSON.stringify(data, null, 2); - var msg = evName + ": " + data; - console.log(msg) - if (currentLogFile) currentLogFile.write(msg + "\n"); - }); -}); - -// irc integration! -var ircClient = null; -const ircChannel = '#identity'; -function ircSend(msg) { - if (!ircClient) { - ircClient = new irc.Client('irc.mozilla.org', 'browserid_deployer', { - channels: [ircChannel] - }); - ircClient.on('error', function(e) { - console.log('irc error: ', e); - }); - ircClient.once('join' + ircChannel, function(e) { - ircClient.say(ircChannel, msg); - }); - } else { - ircClient.say(ircChannel, msg); - } -} - -function ircDisconnect() { - setTimeout(function() { - if (ircClient) { - ircClient.disconnect(); - ircClient = null; - } - }, 1000); -} - - -// now when deployment begins, we log all events -deployer.on('deployment_begins', function(r) { - currentLogFile = fs.createWriteStream(path.join(deployLogDir, r.sha + ".txt")); - currentLogFile.write("deployment of " + r.sha + " begins\n"); - deployingSHA = r.sha; - ircSend("deploying " + r.sha + " - status https://deployer.hacksign.in/" + r.sha + ".txt"); -}); - -function closeLogFile() { - if (currentLogFile) { - currentLogFile.end(); - currentLogFile = null; - } -} - -deployer.on('deployment_complete', function(r) { - ircSend("deployment of " + deployingSHA + " completed successfully in " + - (r.time / 1000.0).toFixed(2) + "s"); - ircDisconnect(); - - closeLogFile(); - deployingSHA = null; - - // always check to see if we should try another deployment after one succeeds to handle rapid fire - // commits - deployer.checkForUpdates(); -}); - -deployer.on('error', function(r) { - ircSend("deployment of " + deployingSHA + " failed. check logs for deets"); - ircDisconnect(); - - closeLogFile(); - deployingSHA = null; - - // on error, try again in 2 minutes - setTimeout(function () { - deployer.checkForUpdates(); - }, 2 * 60 * 1000); -}); - - -// we check every 3 minutes no mattah what. (checks are cheap, github webhooks are flakey) -setInterval(function () { - deployer.checkForUpdates(); -}, (1000 * 60 * 3)); - -// check for updates at startup -deployer.on('ready', function() { - deployer.checkForUpdates(); - - var app = express.createServer(); - - app.get('/check', function(req, res) { - deployer.checkForUpdates(); - res.send('ok'); - }); - - app.get('/', function(req, res) { - var what = "idle"; - if (deployingSHA) what = "deploying " + deployingSHA; - res.send(what); - }); - - app.use(express.static(deployLogDir)); - - app.listen(process.env['PORT'] || 8080, function() { - console.log("deploy server bound"); - }); -});