Skip to content

Commit eff6542

Browse files
committed
cmd-sign: add support for signing OCI images
This adds a new `cosa sign --oci` command to sign OCI container images. This is part of the effort to move FCOS to a container-native build flow, where we now produce non-encapsulated container images. The new command works by sending a request to Robosignatory to sign the image manifest digest. Robosignatory returns a detached signature, which we then merge with the original payload to create a cleartext signed message that can be understood by containers/image. This is a short-term solution until we can move to Sigstore. Part of coreos/fedora-coreos-tracker#1969.
1 parent a190948 commit eff6542

File tree

1 file changed

+207
-0
lines changed

1 file changed

+207
-0
lines changed

src/cmd-sign

Lines changed: 207 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,13 @@
77

88
import argparse
99
import gi
10+
import json
1011
import os
1112
import shutil
1213
import subprocess
1314
import sys
1415
import tempfile
16+
import time
1517

1618
import boto3
1719

@@ -61,6 +63,8 @@ def parse_args():
6163
group = robosig.add_mutually_exclusive_group(required=True)
6264
group.add_argument("--ostree", help="sign commit", action='store_true')
6365
group.add_argument("--images", help="sign images", action='store_true')
66+
group.add_argument("--oci", metavar='KEY',
67+
help="sign OCI image in meta.json key (e.g. 'base-oscontainer')")
6468
robosig.add_argument("--extra-fedmsg-keys", action='append',
6569
metavar='KEY=VAL', default=[],
6670
help="extra keys to inject into messages")
@@ -71,6 +75,7 @@ def parse_args():
7175
robosig.add_argument("--gpgkeypath", help="path to directory containing "
7276
"public keys to use for signature verification",
7377
default="/etc/pki/rpm-gpg")
78+
robosig.add_argument("--s3-sigstore", help="bucket and prefix to S3 sigstore")
7479
robosig.add_argument("--verify-only", action='store_true',
7580
help="verify only that the sigs are valid and make public")
7681
robosig.set_defaults(func=cmd_robosignatory)
@@ -106,6 +111,10 @@ def cmd_robosignatory(args):
106111
if args.s3 is None:
107112
raise Exception("Missing --s3 for --ostree")
108113
robosign_ostree(args, s3, build, gpgkey)
114+
elif args.oci:
115+
if args.verify_only:
116+
raise Exception("Cannot use --verify-only with --oci")
117+
robosign_oci(args, s3, build, gpgkey)
109118
else:
110119
assert args.images
111120
if args.s3 is None:
@@ -296,5 +305,203 @@ def validate_response(response):
296305
assert response['status'].lower() == 'success', str(response)
297306

298307

308+
def robosign_oci(args, s3, build, gpgkey):
309+
builds = Builds()
310+
311+
# Map of {repo:tag -> digest}. "Identity" is the term used in
312+
# containers-signature(5) to refer to how users will actually be pulling
313+
# the image (which is usually by tag).
314+
identities = {}
315+
for arch in builds.get_build_arches(args.build):
316+
build = builds.get_build_meta(args.build, arch)
317+
image = build.get(args.oci)
318+
if not image:
319+
print(f"skipping signing for missing {args.oci} image on {arch}")
320+
continue
321+
322+
# we sign for every tag we've pushed as
323+
for tag in image['tags']:
324+
identity = f"{image['image']}:{tag}"
325+
identities.setdefault(identity, []).append(image['digest'])
326+
327+
# add the git commit of ourselves in the signatures for bookkeeping
328+
creator = 'coreos-assembler'
329+
try:
330+
with open('/cosa/coreos-assembler-git.json') as f:
331+
cosa_git = json.load(f)
332+
creator += ' g' + cosa_git['git']['commit'][:12]
333+
except FileNotFoundError:
334+
pass
335+
336+
with tempfile.TemporaryDirectory(prefix="cosa-sign-", dir="tmp") as d:
337+
# first, create the payloads to be signed
338+
files_to_upload = []
339+
for identity, digests in identities.items():
340+
for digest in digests:
341+
# see https://github.com/containers/container-libs/blob/58b82c921fde7dafbc0da766f1037602cfd5553c/image/docs/containers-signature.5.md?plain=1#L110
342+
data = {
343+
"critical": {
344+
"identity": {
345+
"docker-reference": identity
346+
},
347+
"image": {
348+
"docker-manifest-digest": digest
349+
},
350+
"type": "atomic container signature"
351+
},
352+
"optional": {
353+
"creator": creator,
354+
"timestamp": int(time.time())
355+
}
356+
}
357+
358+
# Make the filename unique per identity file. This is just a
359+
# temporary name. The final naming and structure will be different.
360+
filename = str(abs(hash(str(data))))
361+
path = os.path.join(d, filename)
362+
with open(path, 'w') as f:
363+
# NB: it's important for this to be just one line so that
364+
# we don't have to correct between how gpg canonicalizes
365+
# the input payload differently when it's cleartext signed
366+
# vs detached
367+
json.dump(data, f)
368+
files_to_upload.append({'path': path, 'filename': filename,
369+
'identity': identity, 'digest': digest})
370+
371+
# Upload them to S3. We upload to `staging/` first, and then will move
372+
# them to their final location once they're verified.
373+
sigstore_bucket, sigstore_prefix = get_bucket_and_prefix(args.s3_sigstore)
374+
sigstore_prefix = os.path.join(sigstore_prefix, 'staging')
375+
376+
# First, empty out staging/ so we don't accumulate cruft over time
377+
# https://stackoverflow.com/a/59026702
378+
# Note this assumes we don't run in parallel on the same sigstore
379+
# target, which is the case for us since only one release job can run at
380+
# a time per-stream and the S3 target location is stream-based.
381+
staging_objects = s3.list_objects_v2(Bucket=sigstore_bucket, Prefix=sigstore_prefix)
382+
objects_to_delete = [{'Key': obj['Key']} for obj in staging_objects.get('Contents', [])]
383+
if len(objects_to_delete) > 0:
384+
print(f'Deleting {len(objects_to_delete)} stale files')
385+
s3.delete_objects(Bucket=sigstore_bucket, Delete={'Objects': objects_to_delete})
386+
387+
# now, upload the ones we want
388+
artifacts = []
389+
for f in files_to_upload:
390+
s3_key = os.path.join(sigstore_prefix, f['filename'])
391+
print(f"Uploading s3://{sigstore_bucket}/{s3_key}")
392+
s3.upload_file(f['path'], sigstore_bucket, s3_key)
393+
artifacts.append({
394+
'file': f"s3://{sigstore_bucket}/{s3_key}",
395+
'checksum': f"sha256:{sha256sum_file(f['path'])}"
396+
})
397+
398+
response = send_request_and_wait_for_response(
399+
request_type='artifacts-sign',
400+
config=args.fedmsg_conf,
401+
request_timeout=ROBOSIGNATORY_REQUEST_TIMEOUT_SEC,
402+
priority=ROBOSIGNATORY_MESSAGE_PRIORITY,
403+
environment=fedenv,
404+
body={
405+
'build_id': args.build,
406+
# We pass a 'basearch' here but we're actually bulk signing
407+
# for all arches in one shot. But we can't omit it because
408+
# Robosignatory logs it. It's not used otherwise.
409+
'basearch': args.arch,
410+
'artifacts': artifacts,
411+
**args.extra_keys
412+
}
413+
)
414+
415+
validate_response(response)
416+
417+
# download sigs, verify, finalize, and upload to final location
418+
def gpg(*args):
419+
subprocess.check_call(['gpg', '--homedir', d, *args])
420+
421+
gpg('--quiet', '--import', gpgkey)
422+
423+
sig_counter = {}
424+
# peel off the '/staging' bit
425+
final_sigstore_prefix = os.path.dirname(sigstore_prefix)
426+
for f in files_to_upload:
427+
stg_s3_key = os.path.join(sigstore_prefix, f['filename'])
428+
stg_sig_s3_key = stg_s3_key + '.sig'
429+
430+
tmp_sig_path = os.path.join(d, f['filename'] + '.sig')
431+
print(f"Downloading s3://{sigstore_bucket}/{stg_sig_s3_key}")
432+
s3.download_file(sigstore_bucket, stg_sig_s3_key, tmp_sig_path)
433+
s3.delete_object(Bucket=sigstore_bucket, Key=stg_s3_key)
434+
s3.delete_object(Bucket=sigstore_bucket, Key=stg_sig_s3_key)
435+
436+
local_artifact = f['path']
437+
438+
print(f"Verifying detached signature for {local_artifact}")
439+
try:
440+
gpg('--verify', tmp_sig_path, local_artifact)
441+
except subprocess.CalledProcessError as e:
442+
# allow unknown signatures in stg
443+
if fedenv != 'stg':
444+
raise e
445+
446+
# This is where the magic happens, from a detached signature, we
447+
# merge it with the original payload to create a cleartext signed
448+
# message so it's a single artifact like c/image expects.
449+
# See also: https://github.com/containers/container-libs/pull/307
450+
with open(tmp_sig_path, 'rb') as fp:
451+
armored_sig = subprocess.check_output(['gpg', '--homedir', d, '--enarmor'], input=fp.read())
452+
armored_sig = str(armored_sig, encoding='utf-8')
453+
454+
# not strictly required, but looks more like a usual cleartext signature
455+
armored_sig = armored_sig.replace('ARMORED FILE', 'SIGNATURE')
456+
457+
with open(local_artifact, 'r') as fp:
458+
original_content = fp.read()
459+
460+
signed_message = "-----BEGIN PGP SIGNED MESSAGE-----\n"
461+
# Right now, we assume Robosignatory (really Sigul), uses SHA256;
462+
# in theory we could parse the signature and get the digest algo
463+
# that was used, but it seems unlikely that Sigul will change this
464+
# before it's sunset, at which pont we would've already moved on
465+
# from this code. If it does, here's one way to do it: call `gpg
466+
# --list-packets` and look for 'digest algo N' and convert N to the
467+
# right string based on
468+
# https://github.com/gpg/gnupg/blob/6771ed4c13226ea8f410d022fa83888930070f70/common/openpgpdefs.h#L185
469+
signed_message += "Hash: SHA256\n\n"
470+
signed_message += original_content + "\n"
471+
signed_message += armored_sig
472+
473+
# just overwrite the original payload; we don't need it anymore
474+
with open(f['path'], 'w') as fp:
475+
fp.write(signed_message)
476+
477+
print(f"Verifying cleartext signature {f['path']}")
478+
try:
479+
gpg('--verify', f['path'])
480+
except subprocess.CalledProcessError as e:
481+
# allow unknown signatures in stg
482+
if fedenv != 'stg':
483+
raise e
484+
485+
# tell c/image that it's a valid signature
486+
# https://github.com/containers/container-libs/blob/58b82c921fde7dafbc0da766f1037602cfd5553c/image/internal/signature/signature.go#L66
487+
signed_message = b'\x00simple-signing\n' + bytes(signed_message, encoding='utf-8')
488+
with open(f['path'], 'wb') as fp:
489+
fp.write(signed_message)
490+
491+
image_repo = f['identity']
492+
# e.g. "quay.io/fedora/fedora-coreos:stable" -> "fedora/fedora-coreos"
493+
_, image_repo = image_repo.split('/', 1)
494+
image_repo, _ = image_repo.split(':')
495+
496+
sig_prefix = f"{image_repo}@{f['digest'].replace(':', '=')}"
497+
sig_number = sig_counter.get(sig_prefix, 0) + 1
498+
sig_counter[sig_prefix] = sig_number
499+
500+
# upload to final location and make public
501+
final_s3_key = os.path.join(final_sigstore_prefix, sig_prefix, f"signature-{sig_number}")
502+
print(f"Uploading {f['path']} to s3://{sigstore_bucket}/{final_s3_key}")
503+
s3.upload_file(f['path'], sigstore_bucket, final_s3_key, ExtraArgs={'ACL': 'public-read'})
504+
505+
299506
if __name__ == '__main__':
300507
sys.exit(main())

0 commit comments

Comments
 (0)