diff options
Diffstat (limited to 'contrib/cloud')
| -rwxr-xr-x | contrib/cloud/aws-import | 67 | ||||
| -rwxr-xr-x | contrib/cloud/gce-import | 167 | ||||
| -rwxr-xr-x | contrib/cloud/gce-int13con | 146 |
3 files changed, 366 insertions, 14 deletions
diff --git a/contrib/cloud/aws-import b/contrib/cloud/aws-import index ace005870..77c0fd0f7 100755 --- a/contrib/cloud/aws-import +++ b/contrib/cloud/aws-import @@ -22,11 +22,12 @@ def detect_architecture(image): return 'x86_64' -def create_snapshot(region, description, image): +def create_snapshot(region, description, image, tags): """Create an EBS snapshot""" client = boto3.client('ebs', region_name=region) snapshot = client.start_snapshot(VolumeSize=1, - Description=description) + Description=description, + Tags=tags) snapshot_id = snapshot['SnapshotId'] with open(image, 'rb') as fh: for block in count(): @@ -46,21 +47,42 @@ def create_snapshot(region, description, image): return snapshot_id -def import_image(region, name, architecture, image, public, overwrite): - """Import an AMI image""" +def delete_images(region, filters, retain): client = boto3.client('ec2', region_name=region) resource = boto3.resource('ec2', region_name=region) - description = '%s (%s)' % (name, architecture) - images = client.describe_images(Filters=[{'Name': 'name', - 'Values': [description]}]) - if overwrite and images['Images']: - images = images['Images'][0] - image_id = images['ImageId'] - snapshot_id = images['BlockDeviceMappings'][0]['Ebs']['SnapshotId'] + images = client.describe_images(Owners=['self'], Filters=filters) + old_images = sorted(images['Images'], key=lambda x: x['CreationDate']) + if retain > 0: + old_images = old_images[:-retain] + for image in old_images: + image_id = image['ImageId'] + snapshot_id = image['BlockDeviceMappings'][0]['Ebs']['SnapshotId'] resource.Image(image_id).deregister() resource.Snapshot(snapshot_id).delete() + + +def import_image(region, name, family, architecture, image, public, overwrite, + retain): + """Import an AMI image""" + client = boto3.client('ec2', region_name=region) + resource = boto3.resource('ec2', region_name=region) + description = '%s (%s)' % (name, architecture) + tags = [ + {'Key': 'family', 'Value': family}, + {'Key': 'architecture', 'Value': architecture}, + ] + if overwrite: + filters = [{'Name': 'name', 'Values': [description]}] + delete_images(region=region, filters=filters, retain=0) + if retain is not None: + filters = [ + {'Name': 'tag:family', 'Values': [family]}, + {'Name': 'tag:architecture', 'Values': [architecture]}, + {'Name': 'is-public', 'Values': [str(public).lower()]}, + ] + delete_images(region=region, filters=filters, retain=retain) snapshot_id = create_snapshot(region=region, description=description, - image=image) + image=image, tags=tags) client.get_waiter('snapshot_completed').wait(SnapshotIds=[snapshot_id]) image = client.register_image(Architecture=architecture, BlockDeviceMappings=[{ @@ -72,12 +94,19 @@ def import_image(region, name, architecture, image, public, overwrite): }], EnaSupport=True, Name=description, + TagSpecifications=[{ + 'ResourceType': 'image', + 'Tags': tags, + }], RootDeviceName='/dev/sda1', SriovNetSupport='simple', VirtualizationType='hvm') image_id = image['ImageId'] client.get_waiter('image_available').wait(ImageIds=[image_id]) if public: + image_block = client.get_image_block_public_access_state() + if image_block['ImageBlockPublicAccessState'] != 'unblocked': + client.disable_image_block_public_access() resource.Image(image_id).modify_attribute(Attribute='launchPermission', OperationType='add', UserGroups=['all']) @@ -94,10 +123,14 @@ def launch_link(region, image_id): parser = argparse.ArgumentParser(description="Import AWS EC2 image (AMI)") parser.add_argument('--name', '-n', help="Image name") +parser.add_argument('--family', '-f', + help="Image family name") parser.add_argument('--public', '-p', action='store_true', help="Make image public") parser.add_argument('--overwrite', action='store_true', help="Overwrite any existing image with same name") +parser.add_argument('--retain', type=int, metavar='NUM', + help="Retain at most <NUM> old images") parser.add_argument('--region', '-r', action='append', help="AWS region(s)") parser.add_argument('--wiki', '-w', metavar='FILE', @@ -108,9 +141,13 @@ args = parser.parse_args() # Detect CPU architectures architectures = {image: detect_architecture(image) for image in args.image} +# Use default family name if none specified +if not args.family: + args.family = 'iPXE' + # Use default name if none specified if not args.name: - args.name = 'iPXE (%s)' % date.today().strftime('%Y-%m-%d') + args.name = '%s (%s)' % (args.family, date.today().strftime('%Y-%m-%d')) # Use all regions if none specified if not args.region: @@ -123,10 +160,12 @@ with ThreadPoolExecutor(max_workers=len(imports)) as executor: futures = {executor.submit(import_image, region=region, name=args.name, + family=args.family, architecture=architectures[image], image=image, public=args.public, - overwrite=args.overwrite): (region, image) + overwrite=args.overwrite, + retain=args.retain): (region, image) for region, image in imports} results = {futures[future]: future.result() for future in as_completed(futures)} diff --git a/contrib/cloud/gce-import b/contrib/cloud/gce-import new file mode 100755 index 000000000..e7adfee84 --- /dev/null +++ b/contrib/cloud/gce-import @@ -0,0 +1,167 @@ +#!/usr/bin/env python3 + +import argparse +from concurrent.futures import ThreadPoolExecutor, as_completed +from datetime import date +import io +import subprocess +import tarfile +from uuid import uuid4 + +from google.cloud import compute +from google.cloud import exceptions +from google.cloud import storage + +IPXE_STORAGE_PREFIX = 'ipxe-upload-temp-' + +FEATURE_GVNIC = compute.GuestOsFeature(type_="GVNIC") +FEATURE_IDPF = compute.GuestOsFeature(type_="IDPF") +FEATURE_UEFI = compute.GuestOsFeature(type_="UEFI_COMPATIBLE") + +POLICY_PUBLIC = compute.Policy(bindings=[{ + "role": "roles/compute.imageUser", + "members": ["allAuthenticatedUsers"], +}]) + +def delete_temp_bucket(bucket): + """Remove temporary bucket""" + assert bucket.name.startswith(IPXE_STORAGE_PREFIX) + for blob in bucket.list_blobs(prefix=IPXE_STORAGE_PREFIX): + assert blob.name.startswith(IPXE_STORAGE_PREFIX) + blob.delete() + if not list(bucket.list_blobs()): + bucket.delete() + +def create_temp_bucket(location): + """Create temporary bucket (and remove any stale temporary buckets)""" + client = storage.Client() + for bucket in client.list_buckets(prefix=IPXE_STORAGE_PREFIX): + delete_temp_bucket(bucket) + name = '%s%s' % (IPXE_STORAGE_PREFIX, uuid4()) + return client.create_bucket(name, location=location) + +def create_tarball(image): + """Create raw disk image tarball""" + tarball = io.BytesIO() + with tarfile.open(fileobj=tarball, mode='w:gz', + format=tarfile.GNU_FORMAT) as tar: + tar.add(image, arcname='disk.raw') + tarball.seek(0) + return tarball + +def upload_blob(bucket, image): + """Upload raw disk image blob""" + blob = bucket.blob('%s%s.tar.gz' % (IPXE_STORAGE_PREFIX, uuid4())) + tarball = create_tarball(image) + blob.upload_from_file(tarball) + return blob + +def detect_uefi(image): + """Identify UEFI CPU architecture(s)""" + mdir = subprocess.run(['mdir', '-b', '-i', image, '::/EFI/BOOT'], + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + check=False) + mapping = { + b'BOOTX64.EFI': 'x86_64', + b'BOOTAA64.EFI': 'arm64', + } + uefi = [ + arch + for filename, arch in mapping.items() + if filename in mdir.stdout + ] + return uefi + +def image_architecture(uefi): + """Get image architecture""" + return uefi[0] if len(uefi) == 1 else None if uefi else 'x86_64' + +def image_features(uefi): + """Get image feature list""" + features = [FEATURE_GVNIC, FEATURE_IDPF] + if uefi: + features.append(FEATURE_UEFI) + return features + +def image_name(base, uefi): + """Calculate image name or family name""" + suffix = ('-uefi-%s' % uefi[0].replace('_', '-') if len(uefi) == 1 else + '-uefi-multi' if uefi else '') + return '%s%s' % (base, suffix) + +def create_image(project, basename, basefamily, overwrite, public, bucket, + image): + """Create image""" + client = compute.ImagesClient() + uefi = detect_uefi(image) + architecture = image_architecture(uefi) + features = image_features(uefi) + name = image_name(basename, uefi) + family = image_name(basefamily, uefi) + if overwrite: + try: + client.delete(project=project, image=name).result() + except exceptions.NotFound: + pass + blob = upload_blob(bucket, image) + disk = compute.RawDisk(source=blob.public_url) + image = compute.Image(name=name, family=family, architecture=architecture, + guest_os_features=features, raw_disk=disk) + client.insert(project=project, image_resource=image).result() + if public: + request = compute.GlobalSetPolicyRequest(policy=POLICY_PUBLIC) + client.set_iam_policy(project=project, resource=name, + global_set_policy_request_resource=request) + image = client.get(project=project, image=name) + return image + +# Parse command-line arguments +# +parser = argparse.ArgumentParser(description="Import Google Cloud image") +parser.add_argument('--name', '-n', + help="Base image name") +parser.add_argument('--family', '-f', + help="Base family name") +parser.add_argument('--public', '-p', action='store_true', + help="Make image public") +parser.add_argument('--overwrite', action='store_true', + help="Overwrite any existing image with same name") +parser.add_argument('--project', '-j', default="ipxe-images", + help="Google Cloud project") +parser.add_argument('--location', '-l', + help="Google Cloud Storage initial location") +parser.add_argument('image', nargs='+', help="iPXE disk image") +args = parser.parse_args() + +# Use default family name if none specified +if not args.family: + args.family = 'ipxe' + +# Use default name if none specified +if not args.name: + args.name = '%s-%s' % (args.family, date.today().strftime('%Y%m%d')) + +# Create temporary upload bucket +bucket = create_temp_bucket(args.location) + +# Use one thread per image to maximise parallelism +with ThreadPoolExecutor(max_workers=len(args.image)) as executor: + futures = {executor.submit(create_image, + project=args.project, + basename=args.name, + basefamily=args.family, + overwrite=args.overwrite, + public=args.public, + bucket=bucket, + image=image): image + for image in args.image} + results = {futures[future]: future.result() + for future in as_completed(futures)} + +# Delete temporary upload bucket +delete_temp_bucket(bucket) + +# Show created images +for image in args.image: + result = results[image] + print("%s (%s) %s" % (result.name, result.family, result.status)) diff --git a/contrib/cloud/gce-int13con b/contrib/cloud/gce-int13con new file mode 100755 index 000000000..3b909a44a --- /dev/null +++ b/contrib/cloud/gce-int13con @@ -0,0 +1,146 @@ +#!/usr/bin/env python3 + +import argparse +import textwrap +import time +from uuid import uuid4 + +from google.cloud import compute + +IPXE_LOG_PREFIX = 'ipxe-log-temp-' +IPXE_LOG_MAGIC = 'iPXE LOG' +IPXE_LOG_END = '----- END OF iPXE LOG -----' + +def get_log_disk(instances, project, zone, name): + """Get log disk source URL""" + instance = instances.get(project=project, zone=zone, instance=name) + disk = next(x for x in instance.disks if x.boot) + return disk.source + +def delete_temp_snapshot(snapshots, project, name): + """Delete temporary snapshot""" + assert name.startswith(IPXE_LOG_PREFIX) + snapshots.delete(project=project, snapshot=name) + +def delete_temp_snapshots(snapshots, project): + """Delete all old temporary snapshots""" + filter = "name eq %s.+" % IPXE_LOG_PREFIX + request = compute.ListSnapshotsRequest(project=project, filter=filter) + for snapshot in snapshots.list(request=request): + delete_temp_snapshot(snapshots, project, snapshot.name) + +def create_temp_snapshot(snapshots, project, source): + """Create temporary snapshot""" + name = '%s%s' % (IPXE_LOG_PREFIX, uuid4()) + snapshot = compute.Snapshot(name=name, source_disk=source) + snapshots.insert(project=project, snapshot_resource=snapshot).result() + return name + +def delete_temp_instance(instances, project, zone, name): + """Delete log dumper temporary instance""" + assert name.startswith(IPXE_LOG_PREFIX) + instances.delete(project=project, zone=zone, instance=name) + +def delete_temp_instances(instances, project, zone): + """Delete all old log dumper temporary instances""" + filter = "name eq %s.+" % IPXE_LOG_PREFIX + request = compute.ListInstancesRequest(project=project, zone=zone, + filter=filter) + for instance in instances.list(request=request): + delete_temp_instance(instances, project, zone, instance.name) + +def create_temp_instance(instances, project, zone, family, image, machine, + snapshot): + """Create log dumper temporary instance""" + image = "projects/%s/global/images/family/%s" % (family, image) + machine_type = "zones/%s/machineTypes/%s" % (zone, machine) + logsource = "global/snapshots/%s" % snapshot + bootparams = compute.AttachedDiskInitializeParams(source_image=image) + bootdisk = compute.AttachedDisk(boot=True, auto_delete=True, + initialize_params=bootparams) + logparams = compute.AttachedDiskInitializeParams(source_snapshot=logsource) + logdisk = compute.AttachedDisk(boot=False, auto_delete=True, + initialize_params=logparams, + device_name="ipxelog") + nic = compute.NetworkInterface() + name = '%s%s' % (IPXE_LOG_PREFIX, uuid4()) + script = textwrap.dedent(f""" + #!/bin/sh + tr -d '\\000' < /dev/disk/by-id/google-ipxelog-part3 > /dev/ttyS3 + echo "{IPXE_LOG_END}" > /dev/ttyS3 + """).strip() + items = compute.Items(key="startup-script", value=script) + metadata = compute.Metadata(items=[items]) + instance = compute.Instance(name=name, machine_type=machine_type, + network_interfaces=[nic], metadata=metadata, + disks=[bootdisk, logdisk]) + instances.insert(project=project, zone=zone, + instance_resource=instance).result() + return name + +def get_log_output(instances, project, zone, name): + """Get iPXE log output""" + request = compute.GetSerialPortOutputInstanceRequest(project=project, + zone=zone, port=4, + instance=name) + while True: + log = instances.get_serial_port_output(request=request).contents.strip() + if log.endswith(IPXE_LOG_END): + if log.startswith(IPXE_LOG_MAGIC): + return log[len(IPXE_LOG_MAGIC):-len(IPXE_LOG_END)] + else: + return log[:-len(IPXE_LOG_END)] + time.sleep(1) + +# Parse command-line arguments +# +parser = argparse.ArgumentParser(description="Import Google Cloud image") +parser.add_argument('--project', '-j', default="ipxe-images", + help="Google Cloud project") +parser.add_argument('--zone', '-z', required=True, + help="Google Cloud zone") +parser.add_argument('--family', '-f', default="debian-cloud", + help="Helper OS image family") +parser.add_argument('--image', '-i', default="debian-12", + help="Helper OS image") +parser.add_argument('--machine', '-m', default="e2-micro", + help="Helper machine type") +parser.add_argument('instance', help="Instance name") +args = parser.parse_args() + +# Construct client objects +# +instances = compute.InstancesClient() +snapshots = compute.SnapshotsClient() + +# Clean up old temporary objects +# +delete_temp_instances(instances, project=args.project, zone=args.zone) +delete_temp_snapshots(snapshots, project=args.project) + +# Create log disk snapshot +# +logdisk = get_log_disk(instances, project=args.project, zone=args.zone, + name=args.instance) +logsnap = create_temp_snapshot(snapshots, project=args.project, source=logdisk) + +# Create log dumper instance +# +dumper = create_temp_instance(instances, project=args.project, zone=args.zone, + family=args.family, image=args.image, + machine=args.machine, snapshot=logsnap) + +# Wait for log output +# +output = get_log_output(instances, project=args.project, zone=args.zone, + name=dumper) + +# Print log output +# +print(output) + +# Clean up +# +delete_temp_instance(instances, project=args.project, zone=args.zone, + name=dumper) +delete_temp_snapshot(snapshots, project=args.project, name=logsnap) |
