@@ -20,6 +20,8 @@
Skipping contrib/automation/hgautomation/ssh.py it has no-che?k-code (glob)
Skipping contrib/automation/hgautomation/windows.py it has no-che?k-code (glob)
Skipping contrib/automation/hgautomation/winrm.py it has no-che?k-code (glob)
+ Skipping contrib/ci/hgci/cli.py it has no-che?k-code (glob)
+ Skipping contrib/ci/hgci/try_server.py it has no-che?k-code (glob)
Skipping contrib/ci/lambda_functions/ci.py it has no-che?k-code (glob)
Skipping contrib/ci/lambda_functions/web.py it has no-che?k-code (glob)
Skipping contrib/packaging/hgpackaging/downloads.py it has no-che?k-code (glob)
@@ -13,6 +13,12 @@
}
data "aws_iam_policy_document" "ci_worker" {
+ # Allow CI worker EC2 instances to fetch try bundles from S3.
+ statement {
+ effect = "Allow"
+ actions = ["s3:GetObject"]
+ resources = ["${aws_s3_bucket.mercurial_try_bundles.arn}/*"]
+ }
# Allow CI worker EC2 instances to write artifacts to S3.
statement {
effect = "Allow"
new file mode 100644
@@ -0,0 +1,108 @@
+# Holds user-uploaded Mercurial bundles to trigger CI on.
+resource "aws_s3_bucket" "mercurial_try_bundles" {
+ bucket = "mercurial-try-bundles"
+ region = "us-west-2"
+ acl = "private"
+
+ lifecycle_rule {
+ id = "Purge old objects"
+ enabled = true
+ expiration {
+ days = 30
+ }
+ }
+}
+
+# Logging for Lambda function.
+resource "aws_cloudwatch_log_group" "lambda_ci_try_server_upload" {
+ name = "/aws/lambda/${aws_lambda_function.ci_try_server_upload.function_name}"
+ # Longer than other log groups for auditing purposes.
+ retention_in_days = 30
+}
+
+# Role for Lambda function.
+resource "aws_iam_role" "lambda_ci_try_server_upload" {
+ name = "lambda-ci-try-server-upload"
+ description = "For Lambaa function handling Try server uploads"
+ assume_role_policy = data.aws_iam_policy_document.assume_role_lambda.json
+}
+
+# The Lambda function which handles bundle upload and triggering jobs.
+resource "aws_lambda_function" "ci_try_server_upload" {
+ function_name = "ci-try-server-upload"
+ description = "Handles uploading of bundles to the Try server"
+ filename = data.archive_file.lambda_ci.output_path
+ handler = "ci.handle_try_server_upload"
+ source_code_hash = data.archive_file.lambda_ci.output_base64sha256
+ runtime = "python3.7"
+ timeout = 60
+ role = aws_iam_role.lambda_ci_try_server_upload.arn
+ environment {
+ variables = {
+ DYNAMODB_PUSH_TABLE = aws_dynamodb_table.ci_push.name
+ S3_TRY_BUNDLES_BUCKET = aws_s3_bucket.mercurial_try_bundles.bucket
+ WEB_URL = "https://${aws_api_gateway_domain_name.ci_web.domain_name}/"
+ }
+ }
+}
+
+data "aws_iam_policy_document" "ci_try_server_upload" {
+ # Allow Lambda function to write CloudWatch events.
+ statement {
+ effect = "Allow"
+ actions = [
+ "logs:CreateLogGroup",
+ "logs:CreateLogStream",
+ "logs:PutLogEvents",
+ ]
+ resources = [aws_cloudwatch_log_group.lambda_ci_try_server_upload.arn]
+ }
+ # Enable writing bundle to S3 bucket.
+ statement {
+ effect = "Allow"
+ actions = [
+ "s3:PutObject",
+ "s3:PutObjectAcl",
+ ]
+ resources = ["${aws_s3_bucket.mercurial_try_bundles.arn}/*"]
+ }
+ # Enable querying and inserting pushes into DynamoDB.
+ statement {
+ effect = "Allow"
+ actions = [
+ "dynamodb:PutItem",
+ "dynamodb:Query",
+ ]
+ resources = [
+ aws_dynamodb_table.ci_push.arn,
+ "${aws_dynamodb_table.ci_push.arn}/*",
+ ]
+ }
+}
+
+resource "aws_iam_role_policy" "lambda_ci_try_server_upload" {
+ role = aws_iam_role.lambda_ci_try_server_upload.name
+ name = aws_iam_role.lambda_ci_try_server_upload.name
+ policy = data.aws_iam_policy_document.ci_try_server_upload.json
+}
+
+# An IAM group where membership will grant permission to invoke
+# Lambda function.
+resource "aws_iam_group" "access_to_try_upload" {
+ name = "access-to-try-upload"
+}
+
+data "aws_iam_policy_document" "access_to_try_upload" {
+ # Allow invoking the try upload Lambda function.
+ statement {
+ effect = "Allow"
+ actions = ["lambda:InvokeFunction"]
+ resources = [aws_lambda_function.ci_try_server_upload.arn]
+ }
+}
+
+resource "aws_iam_group_policy" "access_to_try_upload" {
+ name = aws_iam_group.access_to_try_upload.name
+ group = aws_iam_group.access_to_try_upload.name
+ policy = data.aws_iam_policy_document.access_to_try_upload.json
+}
@@ -25,10 +25,23 @@
name = "push_id"
type = "S"
}
+ attribute {
+ name = "node"
+ type = "S"
+ }
hash_key = "repo"
range_key = "push_id"
+ # This allows us to easily query for whether a push already exists for
+ # a specific node in a repository.
+ global_secondary_index {
+ name = "repo-node"
+ hash_key = "repo"
+ range_key = "node"
+ projection_type = "KEYS_ONLY"
+ }
+
stream_enabled = true
stream_view_type = "NEW_AND_OLD_IMAGES"
}
@@ -71,7 +71,6 @@
]
resources = ["*"]
}
- # Allow querying and recording job state in DynamoDB.
statement {
effect = "Allow"
actions = [
new file mode 100644
@@ -0,0 +1,13 @@
+# This file defines AWS account users.
+
+resource "aws_iam_user" "gps" {
+ name = "gps"
+}
+
+resource "aws_iam_group_membership" "access_to_try_upload" {
+ name = "Membership to the access-to-try group to allow Try Server access"
+ group = aws_iam_group.access_to_try_upload.name
+ users = [
+ aws_iam_user.gps.name,
+ ]
+}
new file mode 100644
@@ -0,0 +1 @@
+boto3
new file mode 100644
@@ -0,0 +1,38 @@
+#
+# This file is autogenerated by pip-compile
+# To update, run:
+#
+# pip-compile --generate-hashes --output-file=contrib/ci/requirements.txt contrib/ci/requirements.txt.in
+#
+boto3==1.9.238 \
+ --hash=sha256:2fc1c407a5ab08cfcf54eb4171d85c523bd27019ab890de257d018af2770f71d \
+ --hash=sha256:c215cf2c8e5e7b28ae7544b1cbdbc3216bef983d7adb8b701a64f9b893e0320b
+botocore==1.12.238 \
+ --hash=sha256:1ca993f0dc70591e0fca6cf3837ee9be52fd4fbbf1aa96ba1d4a860b41f676b7 \
+ --hash=sha256:6ec3297b87d3e2c4d88b009f91061aaecdb2ceef6d9be9386394571353909adb \
+ # via boto3, s3transfer
+docutils==0.15.2 \
+ --hash=sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0 \
+ --hash=sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827 \
+ --hash=sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99 \
+ # via botocore
+jmespath==0.9.4 \
+ --hash=sha256:3720a4b1bd659dd2eecad0666459b9788813e032b83e7ba58578e48254e0a0e6 \
+ --hash=sha256:bde2aef6f44302dfb30320115b17d030798de8c4110e28d5cf6cf91a7a31074c \
+ # via boto3, botocore
+python-dateutil==2.8.0 \
+ --hash=sha256:7e6584c74aeed623791615e26efd690f29817a27c73085b78e4bad02493df2fb \
+ --hash=sha256:c89805f6f4d64db21ed966fda138f8a5ed7a4fdbc1a8ee329ce1b74e3c74da9e \
+ # via botocore
+s3transfer==0.2.1 \
+ --hash=sha256:6efc926738a3cd576c2a79725fed9afde92378aa5c6a957e3af010cb019fac9d \
+ --hash=sha256:b780f2411b824cb541dbcd2c713d0cb61c7d1bcadae204cdddda2b35cef493ba \
+ # via boto3
+six==1.12.0 \
+ --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \
+ --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \
+ # via python-dateutil
+urllib3==1.25.6 \
+ --hash=sha256:3de946ffbed6e6746608990594d08faac602528ac7015ac28d33cee6a45b7398 \
+ --hash=sha256:9a107b99a5393caf59c7aa3c1249c16e6879447533d0887f4336dde834c7be86 \
+ # via botocore
@@ -66,11 +66,21 @@
'</style>',
]
- for repo_entry in repo_poll_table.scan(Select='ALL_ATTRIBUTES')['Items']:
+ repos = repo_poll_table.scan(Select='ALL_ATTRIBUTES')['Items']
+
+ # Try repository is virtual. Add it manually.
+ repos.append({
+ 'repo': 'try',
+ })
+
+ for repo_entry in repos:
repo_name = repo_entry['repo']
- repo_url = repo_entry['repo_url']
+ repo_url = repo_entry.get('repo_url', None)
- html.append('<h1><a href="%s">%s</a></h1>' % (e(repo_url, quote=True), e(repo_name)))
+ if repo_url:
+ html.append('<h1><a href="%s">%s</a></h1>' % (e(repo_url, quote=True), e(repo_name)))
+ else:
+ html.append('<h1>%s</h1>' % e(repo_name))
res = push_table.query(
KeyConditionExpression=Key('repo').eq(repo_name),
@@ -401,9 +411,14 @@
def push_info(push, repo_url):
cset_url = '%s/rev/%s' % (repo_url, push['node'])
+ if repo_url:
+ cset_entry = '<a href="%s">%s</a>' % (
+ e(cset_url, quote=True), e(push['node']))
+ else:
+ cset_entry = e(push['node'])
+
return ''.join([
- '<h2>Changeset <span class="mono"><a href="%s">%s</a></span></h2>' % (
- e(cset_url, quote=True), e(push['node'])),
+ '<h2>Changeset <span class="mono">%s</span></h2>' % cset_entry,
'<p>branch: <span class="mono">%s</span></p>' % e(push['branch']),
'<p>author: <span class="mono">%s</span></p>' % e(push['user']),
'<p>description: <span class="mono">%s</span></p>' % e(push['message'].splitlines()[0]),
@@ -13,8 +13,12 @@
import os
import time
import urllib.request
+import uuid
import boto3
+from boto3.dynamodb.conditions import (
+ Key,
+)
def handle_poll_repo(event, context):
@@ -40,7 +44,11 @@
continue
record = record['dynamodb']['NewImage']
- schedule_ci(record['repo_url']['S'], record['repo']['S'], record['node']['S'])
+
+ bundle_url = record['bundle_url']['S'] if 'bundle_url' in record else None
+
+ schedule_ci(record['repo_url']['S'], record['repo']['S'],
+ record['node']['S'], bundle_url=bundle_url)
def handle_pending_job(event, context):
@@ -99,6 +107,42 @@
react_to_instance_state_change(job_table, instance, state)
+def handle_try_server_upload(event, context):
+ """Handler for receiving a Try Server upload request.
+
+ This is invoked by end-users to upload a bundle and trigger jobs on it.
+ """
+ # Since the function is invoked directly, any print()ed output can be
+ # seen by the end-user. So be careful about what is logged.
+ messages = []
+
+ for a in ('bundle', 'node', 'branch', 'user', 'message'):
+ if a not in event:
+ messages.append('error: missing argument "%s"' % a)
+
+ if messages:
+ return messages
+
+ s3 = boto3.resource('s3')
+ dynamodb = boto3.resource('dynamodb')
+
+ bundle_bucket = s3.Bucket(os.environ['S3_TRY_BUNDLES_BUCKET'])
+ push_table = dynamodb.Table(os.environ['DYNAMODB_PUSH_TABLE'])
+
+ return process_try_bundle(
+ bundle_bucket,
+ push_table,
+ base64.b64decode(event['bundle']),
+ event['node'],
+ event['branch'],
+ event['user'],
+ event['message'],
+ os.environ['WEB_URL'],
+ )
+
+ return messages
+
+
def next_build_number(job_table, repo, node, job_name):
"""Find the next available build number for a job given its unique name."""
@@ -190,8 +234,9 @@
poll_table.put_item(Item=new_state)
-def schedule_ci(repo_url, repo, node):
- print('scheduling CI for revision %s on %s' % (node, repo_url))
+def schedule_ci(repo_url, repo, node, bundle_url=None):
+ """Schedule CI from a record in the push table."""
+ print('scheduling CI for revision %s on %s' % (node, repo))
dynamodb = boto3.resource('dynamodb')
ec2 = boto3.resource('ec2')
s3 = boto3.resource('s3')
@@ -203,15 +248,25 @@
# TODO we should build AMIs using in-repo code so all jobs are using an
# appropriate AMI for the revision.
+ # TODO we should store metadata in the push record that allows specifying
+ # which jobs to run.
for image in ec2.images.filter(Owners=['self']):
if image.name == 'hg-linux-dev-debian9':
- schedule_linux_ci(job_table, sqs, sqs_url, bucket, repo_url, repo, node, image, 'debian9')
+ schedule_linux_ci(job_table, sqs, sqs_url, bucket, repo_url, repo,
+ node, image, 'debian9',
+ bundle_url=bundle_url)
elif image.name == 'hg-linux-dev-debian10':
- schedule_linux_ci(job_table, sqs, sqs_url, bucket, repo_url, repo, node, image, 'debian10')
+ schedule_linux_ci(job_table, sqs, sqs_url, bucket, repo_url, repo,
+ node, image, 'debian10',
+ bundle_url=bundle_url)
elif image.name == 'hg-linux-dev-ubuntu18.04':
- schedule_linux_ci(job_table, sqs, sqs_url, bucket, repo_url, repo, node, image, 'ubuntu18.04')
+ schedule_linux_ci(job_table, sqs, sqs_url, bucket, repo_url, repo,
+ node, image, 'ubuntu18.04',
+ bundle_url=bundle_url)
elif image.name == 'hg-linux-dev-ubuntu19.04':
- schedule_linux_ci(job_table, sqs, sqs_url, bucket, repo_url, repo, node, image, 'ubuntu19.04')
+ schedule_linux_ci(job_table, sqs, sqs_url, bucket, repo_url, repo,
+ node, image, 'ubuntu19.04',
+ bundle_url=bundle_url)
RUN_TESTS_LINUX = '''
@@ -219,20 +274,38 @@
HG=/hgdev/venv-bootstrap/bin/hg
+REPO_URL=$1
+NODE=$2
+PYTHON=$3
+S3_URL=$4
+BUNDLE_URL=$5
+
cd /hgwork/src
-${HG} pull -r $2 $1
-${HG} log -r $2
-${HG} up $2
+if [ -z "${BUNDLE_URL}" ]; then
+ echo "pulling $NODE from $REPO_URL"
+ ${HG} pull -r $NODE $REPO_URL
+else
+ echo "pulling $REPO_URL"
+ ${HG} pull $REPO_URL
+ echo "fetching bundle from $BUNDLE_URL"
+ aws s3 cp $BUNDLE_URL bundle.hg
+ echo "applying bundle"
+ ${HG} unbundle bundle.hg
+fi
+
+# Bail immediately if we can't find the requested revision.
+${HG} log -r $NODE || exit 1
+${HG} up $NODE || exit 1
export TMPDIR=/hgwork/tmp
cd tests
-time $3 ./run-tests.py --json 2>&1 | tee output.log
+time $PYTHON ./run-tests.py --json 2>&1 | tee output.log
-aws s3 cp --content-type text/plain --acl public-read output.log $4/output.log
+aws s3 cp --content-type text/plain --acl public-read output.log $S3_URL/output.log
# The JSON file has a prefix to allow loading in web browsers.
tail -c +13 report.json > report-truncated.json
-aws s3 cp --content-type application/json --acl public-read report-truncated.json $4/report.json
+aws s3 cp --content-type application/json --acl public-read report-truncated.json $S3_URL/report.json
'''.lstrip()
@@ -248,6 +321,7 @@
# TAG build_number {build_number}
# TAG s3_bucket {s3_bucket}
# TAG s3_prefix {s3_prefix}
+# TAG bundle_url {bundle_url}
repo_update: false
repo_upgrade: false
@@ -264,7 +338,7 @@
- mkdir /hgwork/tmp
- chown -R hg:hg /hgwork
- sudo -u hg -g hg rsync -a /hgdev/src /hgwork/
- - sudo -u hg -g hg /run-tests-linux {repo_url} {node} {python} s3://{s3_bucket}/{s3_prefix} 2>&1 | tee /ci.log
+ - sudo -u hg -g hg /run-tests-linux {repo_url} {node} {python} s3://{s3_bucket}/{s3_prefix} '{bundle_url}' 2>&1 | tee /ci.log
- aws s3 cp --content-type text/plain --acl public-read /ci.log s3://{s3_bucket}/{s3_prefix}/ci.log
- echo done > done
- aws s3 cp --content-type text/plain --acl public-read done s3://{s3_bucket}/{s3_prefix}/done
@@ -276,7 +350,8 @@
'''.lstrip()
-def schedule_linux_ci(job_table, sqs, sqs_url, bucket, repo_url, repo, node, image, os_prefix):
+def schedule_linux_ci(job_table, sqs, sqs_url, bucket, repo_url, repo,
+ node, image, os_prefix, bundle_url=None):
block_device_mappings = [
{
'DeviceName': image.block_device_mappings[0]['DeviceName'],
@@ -322,6 +397,7 @@
run_tests_linux_b64=run_tests_linux_b64,
s3_bucket=bucket.name,
s3_prefix=bucket_prefix,
+ bundle_url=bundle_url or '',
)
config = {
@@ -403,7 +479,14 @@
continue
kv = line[len('# TAG '):].strip()
- name, value = kv.split(' ', 1)
+ parts = kv.split(' ', 1)
+
+ # Ignore empty values.
+ if len(parts) == 1:
+ continue
+
+ name, value = parts
+
tags[name] = value
set_tags.append({
'Key': name,
@@ -564,3 +647,62 @@
v[kk] = decimal.Decimal(v[kk])
batch.put_item(Item=v)
+
+
+def process_try_bundle(bundle_bucket, push_table, bundle, node, branch, user,
+ message, web_url):
+ """Process an incoming Try bundle."""
+ messages = []
+
+ now = datetime.datetime.utcnow().isoformat()
+
+ messages.append('processing bundle of size %d bytes' % len(bundle))
+
+ # Verify we don't already have a Try push for this node. We may need
+ # to reconsider this logic once we can cherry-pick which jobs get
+ # scheduled...
+ res = push_table.query(
+ IndexName='repo-node',
+ KeyConditionExpression=Key('repo').eq('try') & Key('node').eq(node),
+ Select='ALL_PROJECTED_ATTRIBUTES',
+ )
+ if res.get('Items'):
+ messages.append('%s previously submitted; did you forget to amend?' % node)
+ messages.append('rejecting Try submission')
+ return messages
+
+ key = 'bundle/%s.hg' % uuid.uuid4()
+
+ messages.append('uploading bundle to S3: %s' % key)
+ bundle_bucket.put_object(
+ Key=key,
+ Body=bundle,
+ ContentType='application/octet-stream',
+ ACL='private',
+ StorageClass='STANDARD_IA',
+ )
+
+ bundle_url = 's3://%s/%s' % (bundle_bucket.name, key)
+
+ # We record this as a "push" in DynamoDB. The creation of this entry
+ # will result in CI being scheduled.
+ messages.append('recording push in DynamoDB')
+ push_table.put_item(Item={
+ 'repo': 'try',
+ 'push_id': '%s-try' % now,
+ # Try pushes are always based on the published repository history.
+ 'repo_url': 'https://www.mercurial-scm.org/repo/hg',
+ 'bundle_url': bundle_url,
+ 'repo_name': 'try',
+ 'poll_rev': 'try',
+ 'push_date': now,
+ 'node': node,
+ 'branch': branch,
+ 'user': user,
+ 'message': message,
+ })
+
+ messages.append('push recorded; jobs should start running automatically')
+ messages.append('see results at %s' % web_url)
+
+ return messages
new file mode 100644
@@ -0,0 +1,93 @@
+# try_server.py - Interact with Try server
+#
+# Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+# no-check-code because Python 3 native.
+
+import base64
+import json
+import os
+import subprocess
+import tempfile
+
+import boto3
+
+LAMBDA_FUNCTION = 'ci-try-server-upload'
+
+
+def trigger_try(rev='.'):
+ """Trigger a new Try run."""
+ lambda_client = boto3.client('lambda', region_name='us-west-2')
+
+ cset, bundle = generate_bundle(rev=rev)
+
+ payload = {
+ 'bundle': base64.b64encode(bundle).decode('utf-8'),
+ 'node': cset['node'],
+ 'branch': cset['branch'],
+ 'user': cset['user'],
+ 'message': cset['desc'],
+ }
+
+ print('resolved revision:')
+ print('node: %s' % cset['node'])
+ print('branch: %s' % cset['branch'])
+ print('user: %s' % cset['user'])
+ print('desc: %s' % cset['desc'].splitlines()[0])
+ print()
+
+ print('sending to Try...')
+ res = lambda_client.invoke(
+ FunctionName=LAMBDA_FUNCTION,
+ InvocationType='RequestResponse',
+ Payload=json.dumps(payload).encode('utf-8'),
+ )
+
+ body = json.load(res['Payload'])
+ for message in body:
+ print('remote: %s' % message)
+
+
+def generate_bundle(rev='.'):
+ """Generate a bundle suitable for use by the Try service.
+
+ Returns a tuple of revision metadata and raw Mercurial bundle data.
+ """
+ # `hg bundle` doesn't support streaming to stdout. So we use a temporary
+ # file.
+ path = None
+ try:
+ fd, path = tempfile.mkstemp(prefix='hg-bundle-', suffix='.hg')
+ os.close(fd)
+
+ args = [
+ 'hg', 'bundle',
+ '--type', 'gzip-v2',
+ '--base', 'public()',
+ '--rev', rev,
+ path,
+ ]
+
+ print('generating bundle...')
+ subprocess.run(args, check=True)
+
+ with open(path, 'rb') as fh:
+ bundle_data = fh.read()
+
+ finally:
+ if path:
+ os.unlink(path)
+
+ args = [
+ 'hg', 'log',
+ '-r', rev,
+ # We have to upload as JSON, so it won't matter if we emit binary
+ # since we need to normalize to UTF-8.
+ '-T', 'json',
+ ]
+ res = subprocess.run(args, check=True, capture_output=True)
+ return json.loads(res.stdout)[0], bundle_data
+
new file mode 100644
@@ -0,0 +1,53 @@
+# cli.py - Command line interface for CI
+#
+# Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+# no-check-code because Python 3 native.
+
+import argparse
+import os
+import pathlib
+
+from . import (
+ try_server,
+)
+
+SOURCE_ROOT = pathlib.Path(os.path.abspath(__file__)).parent.parent.parent.parent
+
+
+def run_try(rev):
+ try_server.trigger_try(rev=rev)
+
+
+def get_parser():
+ parser = argparse.ArgumentParser()
+
+ subparsers = parser.add_subparsers()
+
+ sp = subparsers.add_parser(
+ 'try',
+ help='Run CI automation against a custom changeset'
+ )
+ sp.add_argument('-r', '--rev',
+ default='.',
+ help='Revision to run CI on')
+ sp.set_defaults(func=run_try)
+
+ return parser
+
+
+def main():
+ parser = get_parser()
+ args = parser.parse_args()
+
+ if not hasattr(args, 'func'):
+ parser.print_help()
+ return
+
+ kwargs = dict(vars(args))
+ del kwargs['func']
+
+ args.func(**kwargs)
new file mode 100644
new file mode 100755
@@ -0,0 +1,68 @@
+#!/usr/bin/env python3
+#
+# ci.py - Interact with Mercurial CI
+#
+# Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import os
+import pathlib
+import subprocess
+import sys
+import venv
+
+
+HERE = pathlib.Path(os.path.abspath(__file__)).parent
+REQUIREMENTS_TXT = HERE / 'requirements.txt'
+SOURCE_DIR = HERE.parent.parent
+VENV = SOURCE_DIR / 'build' / 'venv-ci'
+
+
+def bootstrap():
+ venv_created = not VENV.exists()
+
+ VENV.parent.mkdir(exist_ok=True)
+
+ venv.create(VENV, with_pip=True)
+
+ if os.name == 'nt':
+ venv_bin = VENV / 'Scripts'
+ pip = venv_bin / 'pip.exe'
+ python = venv_bin / 'python.exe'
+ else:
+ venv_bin = VENV / 'bin'
+ pip = venv_bin / 'pip'
+ python = venv_bin / 'python'
+
+ args = [str(pip), 'install', '-r', str(REQUIREMENTS_TXT),
+ '--disable-pip-version-check']
+
+ if not venv_created:
+ args.append('-q')
+
+ subprocess.run(args, check=True)
+
+ os.environ['HGCI_BOOTSTRAPPED'] = '1'
+ os.environ['PATH'] = '%s%s%s' % (
+ venv_bin, os.pathsep, os.environ['PATH'])
+
+ subprocess.run([str(python), __file__] + sys.argv[1:], check=True)
+
+
+def run():
+ import hgci.cli as cli
+ cli.main()
+
+
+if __name__ == '__main__':
+ try:
+ if 'HGCI_BOOTSTRAPPED' not in os.environ:
+ bootstrap()
+ else:
+ run()
+ except subprocess.CalledProcessError as e:
+ sys.exit(e.returncode)
+ except KeyboardInterrupt:
+ sys.exit(1)
@@ -4,6 +4,54 @@
This directory defines a CI system for the Mercurial Project.
+Try Server
+==========
+
+The CI system features a *Try Server* which allows you to submit
+arbitrary changesets for evaluation. This works by sending a
+Mercurial bundle to a remote server, which ingests it into the CI
+system and schedules it for execution.
+
+Registering
+-----------
+
+Because the *Try Server* is remote code execution as a service,
+we require users to register to use it and all requests must be
+authenticated.
+
+To register a new account, you will need to add the requested user
+to the ``terraform/iam_users.tf`` file. Create a new
+``aws_iam_user`` resource representing the new user and add this
+user to the ``access_to_try_upload`` ``aws_iam_group_membership``
+resource also in that file.
+
+This change will need to be submitted for review and applied by an
+admin of the CI system.
+
+TODO document how to configure authentication credentials
+
+Using
+-----
+
+Once you have your AWS credentials configured, using the Try server
+is simple::
+
+ $ contrib/ci/ci.py try
+ generating bundle...
+ 37 changesets found
+ resolved revision:
+ node: 675ee4827c97bc85ae519b37a5be6ebc95c17b80
+ branch: default
+ user: Gregory Szorc <gregory.szorc@gmail.com>
+ desc: ci: implement a "try server"
+
+ sending to Try...
+ remote: processing bundle of size 49900 bytes
+ remote: uploading bundle to S3: bundle/cf667fe0-f8ce-44db-978e-7fd36756aa86.hg
+ remote: recording push in DynamoDB
+ remote: push recorded; jobs should start running automatically
+ remote: see results at https://ci.hg.gregoryszorc.com/
+
Architecture
============
@@ -126,6 +174,20 @@
The Terraform code for this component lives in ``web.tf``.
+Try Server
+----------
+
+The *try server* components provides a mechanism to trigger CI on arbitrary
+diffs. It allows Mercurial developers to test ad-hoc changes by uploading
+Mercurial bundles to the CI system.
+
+The *try server* works by exposing a Lambda function to privileged
+users. The Lambda function is invoked with a Mercurial bundle and metadata
+describing the requested jobs. The function then stores the bundle in an
+S3 bucket and registers jobs to run against the uploaded bundle.
+
+The Terraform code for this component lives in ``try_server.tf``.
+
AWS Account Management
======================
@@ -204,13 +266,14 @@
We have no mechanism to retrigger a job. This requires some form of
authentication to prevent abuse.
-We have no mechanism to trigger CI on arbitrary diffs. We would like
-to provide some kind of *try server* where you can submit a diff and
-the system builds it. Again, this requires some form of authentication.
-
We have no mechanism to choose which jobs to execute. We probably want
to build this because there is no need to execute all jobs all the time.
+The association between jobs, job results, and pushes is a bit wonky.
+Things can get confused if there are multiple pushes for the same
+changeset. We may need to change the primary key of the DynamoDB tables
+to fix this.
+
Development Workflow
====================