upload android base code part6
This commit is contained in:
parent
421e214c7d
commit
4e516ec6ed
35396 changed files with 9188716 additions and 0 deletions
664
android/system/update_engine/scripts/brillo_update_payload
Executable file
664
android/system/update_engine/scripts/brillo_update_payload
Executable file
|
@ -0,0 +1,664 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Chromium OS Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
# Script to generate a Brillo update for use by the update engine.
|
||||
#
|
||||
# usage: brillo_update_payload COMMAND [ARGS]
|
||||
# The following commands are supported:
|
||||
# generate generate an unsigned payload
|
||||
# hash generate a payload or metadata hash
|
||||
# sign generate a signed payload
|
||||
# properties generate a properties file from a payload
|
||||
#
|
||||
# Generate command arguments:
|
||||
# --payload generated unsigned payload output file
|
||||
# --source_image if defined, generate a delta payload from the specified
|
||||
# image to the target_image
|
||||
# --target_image the target image that should be sent to clients
|
||||
# --metadata_size_file if defined, generate a file containing the size of the payload
|
||||
# metadata in bytes to the specified file
|
||||
#
|
||||
# Hash command arguments:
|
||||
# --unsigned_payload the input unsigned payload to generate the hash from
|
||||
# --signature_size signature sizes in bytes in the following format:
|
||||
# "size1:size2[:...]"
|
||||
# --payload_hash_file if defined, generate a payload hash and output to the
|
||||
# specified file
|
||||
# --metadata_hash_file if defined, generate a metadata hash and output to the
|
||||
# specified file
|
||||
#
|
||||
# Sign command arguments:
|
||||
# --unsigned_payload the input unsigned payload to insert the signatures
|
||||
# --payload the output signed payload
|
||||
# --signature_size signature sizes in bytes in the following format:
|
||||
# "size1:size2[:...]"
|
||||
# --payload_signature_file the payload signature files in the following
|
||||
# format:
|
||||
# "payload_signature1:payload_signature2[:...]"
|
||||
# --metadata_signature_file the metadata signature files in the following
|
||||
# format:
|
||||
# "metadata_signature1:metadata_signature2[:...]"
|
||||
# --metadata_size_file if defined, generate a file containing the size of
|
||||
# the signed payload metadata in bytes to the
|
||||
# specified file
|
||||
# Note that the number of signature sizes and payload signatures have to match.
|
||||
#
|
||||
# Properties command arguments:
|
||||
# --payload the input signed or unsigned payload
|
||||
# --properties_file the output path where to write the properties, or
|
||||
# '-' for stdout.
|
||||
|
||||
|
||||
# Exit codes:
|
||||
EX_UNSUPPORTED_DELTA=100
|
||||
|
||||
warn() {
|
||||
echo "brillo_update_payload: warning: $*" >&2
|
||||
}
|
||||
|
||||
die() {
|
||||
echo "brillo_update_payload: error: $*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Loads shflags. We first look at the default install location; then look for
|
||||
# crosutils (chroot); finally check our own directory (au-generator zipfile).
|
||||
load_shflags() {
|
||||
local my_dir="$(dirname "$(readlink -f "$0")")"
|
||||
local path
|
||||
for path in /usr/share/misc {/usr/lib/crosutils,"${my_dir}"}/lib/shflags; do
|
||||
if [[ -r "${path}/shflags" ]]; then
|
||||
. "${path}/shflags" || die "Could not load ${path}/shflags."
|
||||
return
|
||||
fi
|
||||
done
|
||||
die "Could not find shflags."
|
||||
}
|
||||
|
||||
load_shflags
|
||||
|
||||
HELP_GENERATE="generate: Generate an unsigned update payload."
|
||||
HELP_HASH="hash: Generate the hashes of the unsigned payload and metadata used \
|
||||
for signing."
|
||||
HELP_SIGN="sign: Insert the signatures into the unsigned payload."
|
||||
HELP_PROPERTIES="properties: Extract payload properties to a file."
|
||||
|
||||
usage() {
|
||||
echo "Supported commands:"
|
||||
echo
|
||||
echo "${HELP_GENERATE}"
|
||||
echo "${HELP_HASH}"
|
||||
echo "${HELP_SIGN}"
|
||||
echo "${HELP_PROPERTIES}"
|
||||
echo
|
||||
echo "Use: \"$0 <command> --help\" for more options."
|
||||
}
|
||||
|
||||
# Check that a command is specified.
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo "Please specify a command [generate|hash|sign|properties]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Parse command.
|
||||
COMMAND="${1:-}"
|
||||
shift
|
||||
|
||||
case "${COMMAND}" in
|
||||
generate)
|
||||
FLAGS_HELP="${HELP_GENERATE}"
|
||||
;;
|
||||
|
||||
hash)
|
||||
FLAGS_HELP="${HELP_HASH}"
|
||||
;;
|
||||
|
||||
sign)
|
||||
FLAGS_HELP="${HELP_SIGN}"
|
||||
;;
|
||||
|
||||
properties)
|
||||
FLAGS_HELP="${HELP_PROPERTIES}"
|
||||
;;
|
||||
*)
|
||||
echo "Unrecognized command: \"${COMMAND}\"" >&2
|
||||
usage >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Flags
|
||||
FLAGS_HELP="Usage: $0 ${COMMAND} [flags]
|
||||
${FLAGS_HELP}"
|
||||
|
||||
if [[ "${COMMAND}" == "generate" ]]; then
|
||||
DEFINE_string payload "" \
|
||||
"Path to output the generated unsigned payload file."
|
||||
DEFINE_string target_image "" \
|
||||
"Path to the target image that should be sent to clients."
|
||||
DEFINE_string source_image "" \
|
||||
"Optional: Path to a source image. If specified, this makes a delta update."
|
||||
DEFINE_string metadata_size_file "" \
|
||||
"Optional: Path to output metadata size."
|
||||
DEFINE_string max_timestamp "" \
|
||||
"Optional: The maximum unix timestamp of the OS allowed to apply this \
|
||||
payload, should be set to a number higher than the build timestamp of the \
|
||||
system running on the device, 0 if not specified."
|
||||
fi
|
||||
if [[ "${COMMAND}" == "hash" || "${COMMAND}" == "sign" ]]; then
|
||||
DEFINE_string unsigned_payload "" "Path to the input unsigned payload."
|
||||
DEFINE_string signature_size "" \
|
||||
"Signature sizes in bytes in the following format: size1:size2[:...]"
|
||||
fi
|
||||
if [[ "${COMMAND}" == "hash" ]]; then
|
||||
DEFINE_string metadata_hash_file "" \
|
||||
"Optional: Path to output metadata hash file."
|
||||
DEFINE_string payload_hash_file "" \
|
||||
"Optional: Path to output payload hash file."
|
||||
fi
|
||||
if [[ "${COMMAND}" == "sign" ]]; then
|
||||
DEFINE_string payload "" \
|
||||
"Path to output the generated unsigned payload file."
|
||||
DEFINE_string metadata_signature_file "" \
|
||||
"The metatada signatures in the following format: \
|
||||
metadata_signature1:metadata_signature2[:...]"
|
||||
DEFINE_string payload_signature_file "" \
|
||||
"The payload signatures in the following format: \
|
||||
payload_signature1:payload_signature2[:...]"
|
||||
DEFINE_string metadata_size_file "" \
|
||||
"Optional: Path to output metadata size."
|
||||
fi
|
||||
if [[ "${COMMAND}" == "properties" ]]; then
|
||||
DEFINE_string payload "" \
|
||||
"Path to the input signed or unsigned payload file."
|
||||
DEFINE_string properties_file "-" \
|
||||
"Path to output the extracted property files. If '-' is passed stdout will \
|
||||
be used."
|
||||
fi
|
||||
|
||||
DEFINE_string work_dir "${TMPDIR:-/tmp}" "Where to dump temporary files."
|
||||
|
||||
# Parse command line flag arguments
|
||||
FLAGS "$@" || exit 1
|
||||
eval set -- "${FLAGS_ARGV}"
|
||||
set -e
|
||||
|
||||
# Override the TMPDIR with the passed work_dir flags, which anyway defaults to
|
||||
# ${TMPDIR}.
|
||||
TMPDIR="${FLAGS_work_dir}"
|
||||
export TMPDIR
|
||||
|
||||
# Associative arrays from partition name to file in the source and target
|
||||
# images. The size of the updated area must be the size of the file.
|
||||
declare -A SRC_PARTITIONS
|
||||
declare -A DST_PARTITIONS
|
||||
|
||||
# Associative arrays for the .map files associated with each src/dst partition
|
||||
# file in SRC_PARTITIONS and DST_PARTITIONS.
|
||||
declare -A SRC_PARTITIONS_MAP
|
||||
declare -A DST_PARTITIONS_MAP
|
||||
|
||||
# List of partition names in order.
|
||||
declare -a PARTITIONS_ORDER
|
||||
|
||||
# A list of temporary files to remove during cleanup.
|
||||
CLEANUP_FILES=()
|
||||
|
||||
# Global options to force the version of the payload.
|
||||
FORCE_MAJOR_VERSION=""
|
||||
FORCE_MINOR_VERSION=""
|
||||
|
||||
# Path to the postinstall config file in target image if exists.
|
||||
POSTINSTALL_CONFIG_FILE=""
|
||||
|
||||
# The fingerprint of zlib in the source image.
|
||||
ZLIB_FINGERPRINT=""
|
||||
|
||||
# read_option_int <file.txt> <option_key> [default_value]
|
||||
#
|
||||
# Reads the unsigned integer value associated with |option_key| in a key=value
|
||||
# file |file.txt|. Prints the read value if found and valid, otherwise prints
|
||||
# the |default_value|.
|
||||
read_option_uint() {
|
||||
local file_txt="$1"
|
||||
local option_key="$2"
|
||||
local default_value="${3:-}"
|
||||
local value
|
||||
if value=$(look "${option_key}=" "${file_txt}" | tail -n 1); then
|
||||
if value=$(echo "${value}" | cut -f 2- -d "=" | grep -E "^[0-9]+$"); then
|
||||
echo "${value}"
|
||||
return
|
||||
fi
|
||||
fi
|
||||
echo "${default_value}"
|
||||
}
|
||||
|
||||
# truncate_file <file_path> <file_size>
|
||||
#
|
||||
# Truncate the given |file_path| to |file_size| using perl.
|
||||
# The truncate binary might not be available.
|
||||
truncate_file() {
|
||||
local file_path="$1"
|
||||
local file_size="$2"
|
||||
perl -e "open(FILE, \"+<\", \$ARGV[0]); \
|
||||
truncate(FILE, ${file_size}); \
|
||||
close(FILE);" "${file_path}"
|
||||
}
|
||||
|
||||
# Create a temporary file in the work_dir with an optional pattern name.
|
||||
# Prints the name of the newly created file.
|
||||
create_tempfile() {
|
||||
local pattern="${1:-tempfile.XXXXXX}"
|
||||
mktemp --tmpdir="${FLAGS_work_dir}" "${pattern}"
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
local err=""
|
||||
rm -f "${CLEANUP_FILES[@]}" || err=1
|
||||
|
||||
# If we are cleaning up after an error, or if we got an error during
|
||||
# cleanup (even if we eventually succeeded) return a non-zero exit
|
||||
# code. This triggers additional logging in most environments that call
|
||||
# this script.
|
||||
if [[ -n "${err}" ]]; then
|
||||
die "Cleanup encountered an error."
|
||||
fi
|
||||
}
|
||||
|
||||
cleanup_on_error() {
|
||||
trap - INT TERM ERR EXIT
|
||||
cleanup
|
||||
die "Cleanup success after an error."
|
||||
}
|
||||
|
||||
cleanup_on_exit() {
|
||||
trap - INT TERM ERR EXIT
|
||||
cleanup
|
||||
}
|
||||
|
||||
trap cleanup_on_error INT TERM ERR
|
||||
trap cleanup_on_exit EXIT
|
||||
|
||||
|
||||
# extract_image <image> <partitions_array> [partitions_order]
|
||||
#
|
||||
# Detect the format of the |image| file and extract its updatable partitions
|
||||
# into new temporary files. Add the list of partition names and its files to the
|
||||
# associative array passed in |partitions_array|. If |partitions_order| is
|
||||
# passed, set it to list of partition names in order.
|
||||
extract_image() {
|
||||
local image="$1"
|
||||
|
||||
# Brillo images are zip files. We detect the 4-byte magic header of the zip
|
||||
# file.
|
||||
local magic=$(head --bytes=4 "${image}" | hexdump -e '1/1 "%.2x"')
|
||||
if [[ "${magic}" == "504b0304" ]]; then
|
||||
echo "Detected .zip file, extracting Brillo image."
|
||||
extract_image_brillo "$@"
|
||||
return
|
||||
fi
|
||||
|
||||
# Chrome OS images are GPT partitioned disks. We should have the cgpt binary
|
||||
# bundled here and we will use it to extract the partitions, so the GPT
|
||||
# headers must be valid.
|
||||
if cgpt show -q -n "${image}" >/dev/null; then
|
||||
echo "Detected GPT image, extracting Chrome OS image."
|
||||
extract_image_cros "$@"
|
||||
return
|
||||
fi
|
||||
|
||||
die "Couldn't detect the image format of ${image}"
|
||||
}
|
||||
|
||||
# extract_image_cros <image.bin> <partitions_array> [partitions_order]
|
||||
#
|
||||
# Extract Chromium OS recovery images into new temporary files.
|
||||
extract_image_cros() {
|
||||
local image="$1"
|
||||
local partitions_array="$2"
|
||||
local partitions_order="${3:-}"
|
||||
|
||||
local kernel root
|
||||
kernel=$(create_tempfile "kernel.bin.XXXXXX")
|
||||
CLEANUP_FILES+=("${kernel}")
|
||||
root=$(create_tempfile "root.bin.XXXXXX")
|
||||
CLEANUP_FILES+=("${root}")
|
||||
|
||||
cros_generate_update_payload --extract \
|
||||
--image "${image}" \
|
||||
--kern_path "${kernel}" --root_path "${root}" \
|
||||
--work_dir "${FLAGS_work_dir}" --outside_chroot
|
||||
|
||||
# Chrome OS uses major_version 1 payloads for all versions, even if the
|
||||
# updater supports a newer major version.
|
||||
FORCE_MAJOR_VERSION="1"
|
||||
|
||||
if [[ "${partitions_array}" == "SRC_PARTITIONS" ]]; then
|
||||
# Copy from zlib_fingerprint in source image to stdout.
|
||||
ZLIB_FINGERPRINT=$(e2cp "${root}":/etc/zlib_fingerprint -)
|
||||
fi
|
||||
|
||||
# When generating legacy Chrome OS images, we need to use "boot" and "system"
|
||||
# for the partition names to be compatible with updating Brillo devices with
|
||||
# Chrome OS images.
|
||||
eval ${partitions_array}[boot]=\""${kernel}"\"
|
||||
eval ${partitions_array}[system]=\""${root}"\"
|
||||
|
||||
if [[ -n "${partitions_order}" ]]; then
|
||||
eval "${partitions_order}=( \"system\" \"boot\" )"
|
||||
fi
|
||||
|
||||
local part varname
|
||||
for part in boot system; do
|
||||
varname="${partitions_array}[${part}]"
|
||||
printf "md5sum of %s: " "${varname}"
|
||||
md5sum "${!varname}"
|
||||
done
|
||||
}
|
||||
|
||||
# extract_image_brillo <target_files.zip> <partitions_array> [partitions_order]
|
||||
#
|
||||
# Extract the A/B updated partitions from a Brillo target_files zip file into
|
||||
# new temporary files.
|
||||
extract_image_brillo() {
|
||||
local image="$1"
|
||||
local partitions_array="$2"
|
||||
local partitions_order="${3:-}"
|
||||
|
||||
local partitions=( "boot" "system" )
|
||||
local ab_partitions_list
|
||||
ab_partitions_list=$(create_tempfile "ab_partitions_list.XXXXXX")
|
||||
CLEANUP_FILES+=("${ab_partitions_list}")
|
||||
if unzip -p "${image}" "META/ab_partitions.txt" >"${ab_partitions_list}"; then
|
||||
if grep -v -E '^[a-zA-Z0-9_-]*$' "${ab_partitions_list}" >&2; then
|
||||
die "Invalid partition names found in the partition list."
|
||||
fi
|
||||
partitions=($(cat "${ab_partitions_list}"))
|
||||
if [[ ${#partitions[@]} -eq 0 ]]; then
|
||||
die "The list of partitions is empty. Can't generate a payload."
|
||||
fi
|
||||
else
|
||||
warn "No ab_partitions.txt found. Using default."
|
||||
fi
|
||||
echo "List of A/B partitions: ${partitions[@]}"
|
||||
|
||||
if [[ -n "${partitions_order}" ]]; then
|
||||
eval "${partitions_order}=(${partitions[@]})"
|
||||
fi
|
||||
|
||||
# All Brillo updaters support major version 2.
|
||||
FORCE_MAJOR_VERSION="2"
|
||||
|
||||
if [[ "${partitions_array}" == "SRC_PARTITIONS" ]]; then
|
||||
# Source image
|
||||
local ue_config=$(create_tempfile "ue_config.XXXXXX")
|
||||
CLEANUP_FILES+=("${ue_config}")
|
||||
if ! unzip -p "${image}" "META/update_engine_config.txt" \
|
||||
>"${ue_config}"; then
|
||||
warn "No update_engine_config.txt found. Assuming pre-release image, \
|
||||
using payload minor version 2"
|
||||
fi
|
||||
# For delta payloads, we use the major and minor version supported by the
|
||||
# old updater.
|
||||
FORCE_MINOR_VERSION=$(read_option_uint "${ue_config}" \
|
||||
"PAYLOAD_MINOR_VERSION" 2)
|
||||
FORCE_MAJOR_VERSION=$(read_option_uint "${ue_config}" \
|
||||
"PAYLOAD_MAJOR_VERSION" 2)
|
||||
|
||||
# Brillo support for deltas started with minor version 3.
|
||||
if [[ "${FORCE_MINOR_VERSION}" -le 2 ]]; then
|
||||
warn "No delta support from minor version ${FORCE_MINOR_VERSION}. \
|
||||
Disabling deltas for this source version."
|
||||
exit ${EX_UNSUPPORTED_DELTA}
|
||||
fi
|
||||
|
||||
if [[ "${FORCE_MINOR_VERSION}" -ge 4 ]]; then
|
||||
ZLIB_FINGERPRINT=$(unzip -p "${image}" "META/zlib_fingerprint.txt")
|
||||
fi
|
||||
else
|
||||
# Target image
|
||||
local postinstall_config=$(create_tempfile "postinstall_config.XXXXXX")
|
||||
CLEANUP_FILES+=("${postinstall_config}")
|
||||
if unzip -p "${image}" "META/postinstall_config.txt" \
|
||||
>"${postinstall_config}"; then
|
||||
POSTINSTALL_CONFIG_FILE="${postinstall_config}"
|
||||
fi
|
||||
fi
|
||||
|
||||
local part part_file temp_raw filesize
|
||||
for part in "${partitions[@]}"; do
|
||||
part_file=$(create_tempfile "${part}.img.XXXXXX")
|
||||
CLEANUP_FILES+=("${part_file}")
|
||||
unzip -p "${image}" "IMAGES/${part}.img" >"${part_file}"
|
||||
|
||||
# If the partition is stored as an Android sparse image file, we need to
|
||||
# convert them to a raw image for the update.
|
||||
local magic=$(head --bytes=4 "${part_file}" | hexdump -e '1/1 "%.2x"')
|
||||
if [[ "${magic}" == "3aff26ed" ]]; then
|
||||
temp_raw=$(create_tempfile "${part}.raw.XXXXXX")
|
||||
CLEANUP_FILES+=("${temp_raw}")
|
||||
echo "Converting Android sparse image ${part}.img to RAW."
|
||||
simg2img "${part_file}" "${temp_raw}"
|
||||
# At this point, we can drop the contents of the old part_file file, but
|
||||
# we can't delete the file because it will be deleted in cleanup.
|
||||
true >"${part_file}"
|
||||
part_file="${temp_raw}"
|
||||
fi
|
||||
|
||||
# Extract the .map file (if one is available).
|
||||
part_map_file=$(create_tempfile "${part}.map.XXXXXX")
|
||||
CLEANUP_FILES+=("${part_map_file}")
|
||||
unzip -p "${image}" "IMAGES/${part}.map" >"${part_map_file}" || \
|
||||
part_map_file=""
|
||||
|
||||
# delta_generator only supports images multiple of 4 KiB. For target images
|
||||
# we pad the data with zeros if needed, but for source images we truncate
|
||||
# down the data since the last block of the old image could be padded on
|
||||
# disk with unknown data.
|
||||
filesize=$(stat -c%s "${part_file}")
|
||||
if [[ $(( filesize % 4096 )) -ne 0 ]]; then
|
||||
if [[ "${partitions_array}" == "SRC_PARTITIONS" ]]; then
|
||||
echo "Rounding DOWN partition ${part}.img to a multiple of 4 KiB."
|
||||
: $(( filesize = filesize & -4096 ))
|
||||
if [[ ${filesize} == 0 ]]; then
|
||||
echo "Source partition ${part}.img is empty after rounding down," \
|
||||
"skipping."
|
||||
continue
|
||||
fi
|
||||
else
|
||||
echo "Rounding UP partition ${part}.img to a multiple of 4 KiB."
|
||||
: $(( filesize = (filesize + 4095) & -4096 ))
|
||||
fi
|
||||
truncate_file "${part_file}" "${filesize}"
|
||||
fi
|
||||
|
||||
eval "${partitions_array}[\"${part}\"]=\"${part_file}\""
|
||||
eval "${partitions_array}_MAP[\"${part}\"]=\"${part_map_file}\""
|
||||
echo "Extracted ${partitions_array}[${part}]: ${filesize} bytes"
|
||||
done
|
||||
}
|
||||
|
||||
validate_generate() {
|
||||
[[ -n "${FLAGS_payload}" ]] ||
|
||||
die "You must specify an output filename with --payload FILENAME"
|
||||
|
||||
[[ -n "${FLAGS_target_image}" ]] ||
|
||||
die "You must specify a target image with --target_image FILENAME"
|
||||
}
|
||||
|
||||
cmd_generate() {
|
||||
local payload_type="delta"
|
||||
if [[ -z "${FLAGS_source_image}" ]]; then
|
||||
payload_type="full"
|
||||
fi
|
||||
|
||||
echo "Extracting images for ${payload_type} update."
|
||||
|
||||
extract_image "${FLAGS_target_image}" DST_PARTITIONS PARTITIONS_ORDER
|
||||
if [[ "${payload_type}" == "delta" ]]; then
|
||||
extract_image "${FLAGS_source_image}" SRC_PARTITIONS
|
||||
fi
|
||||
|
||||
echo "Generating ${payload_type} update."
|
||||
# Common payload args:
|
||||
GENERATOR_ARGS=( -out_file="${FLAGS_payload}" )
|
||||
|
||||
local part old_partitions="" new_partitions="" partition_names=""
|
||||
local old_mapfiles="" new_mapfiles=""
|
||||
for part in "${PARTITIONS_ORDER[@]}"; do
|
||||
if [[ -n "${partition_names}" ]]; then
|
||||
partition_names+=":"
|
||||
new_partitions+=":"
|
||||
old_partitions+=":"
|
||||
new_mapfiles+=":"
|
||||
old_mapfiles+=":"
|
||||
fi
|
||||
partition_names+="${part}"
|
||||
new_partitions+="${DST_PARTITIONS[${part}]}"
|
||||
old_partitions+="${SRC_PARTITIONS[${part}]:-}"
|
||||
new_mapfiles+="${DST_PARTITIONS_MAP[${part}]:-}"
|
||||
old_mapfiles+="${SRC_PARTITIONS_MAP[${part}]:-}"
|
||||
done
|
||||
|
||||
# Target image args:
|
||||
GENERATOR_ARGS+=(
|
||||
-partition_names="${partition_names}"
|
||||
-new_partitions="${new_partitions}"
|
||||
-new_mapfiles="${new_mapfiles}"
|
||||
)
|
||||
|
||||
if [[ "${payload_type}" == "delta" ]]; then
|
||||
# Source image args:
|
||||
GENERATOR_ARGS+=(
|
||||
-old_partitions="${old_partitions}"
|
||||
-old_mapfiles="${old_mapfiles}"
|
||||
)
|
||||
if [[ -n "${FORCE_MINOR_VERSION}" ]]; then
|
||||
GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" )
|
||||
fi
|
||||
if [[ -n "${ZLIB_FINGERPRINT}" ]]; then
|
||||
GENERATOR_ARGS+=( --zlib_fingerprint="${ZLIB_FINGERPRINT}" )
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${FORCE_MAJOR_VERSION}" ]]; then
|
||||
GENERATOR_ARGS+=( --major_version="${FORCE_MAJOR_VERSION}" )
|
||||
fi
|
||||
|
||||
if [[ -n "${FLAGS_metadata_size_file}" ]]; then
|
||||
GENERATOR_ARGS+=( --out_metadata_size_file="${FLAGS_metadata_size_file}" )
|
||||
fi
|
||||
|
||||
if [[ -n "${FLAGS_max_timestamp}" ]]; then
|
||||
GENERATOR_ARGS+=( --max_timestamp="${FLAGS_max_timestamp}" )
|
||||
fi
|
||||
|
||||
if [[ -n "${POSTINSTALL_CONFIG_FILE}" ]]; then
|
||||
GENERATOR_ARGS+=(
|
||||
--new_postinstall_config_file="${POSTINSTALL_CONFIG_FILE}"
|
||||
)
|
||||
fi
|
||||
|
||||
echo "Running delta_generator with args: ${GENERATOR_ARGS[@]}"
|
||||
"${GENERATOR}" "${GENERATOR_ARGS[@]}"
|
||||
|
||||
echo "Done generating ${payload_type} update."
|
||||
}
|
||||
|
||||
validate_hash() {
|
||||
[[ -n "${FLAGS_signature_size}" ]] ||
|
||||
die "You must specify signature size with --signature_size SIZES"
|
||||
|
||||
[[ -n "${FLAGS_unsigned_payload}" ]] ||
|
||||
die "You must specify the input unsigned payload with \
|
||||
--unsigned_payload FILENAME"
|
||||
|
||||
[[ -n "${FLAGS_payload_hash_file}" ]] ||
|
||||
die "You must specify --payload_hash_file FILENAME"
|
||||
|
||||
[[ -n "${FLAGS_metadata_hash_file}" ]] ||
|
||||
die "You must specify --metadata_hash_file FILENAME"
|
||||
}
|
||||
|
||||
cmd_hash() {
|
||||
"${GENERATOR}" \
|
||||
-in_file="${FLAGS_unsigned_payload}" \
|
||||
-signature_size="${FLAGS_signature_size}" \
|
||||
-out_hash_file="${FLAGS_payload_hash_file}" \
|
||||
-out_metadata_hash_file="${FLAGS_metadata_hash_file}"
|
||||
|
||||
echo "Done generating hash."
|
||||
}
|
||||
|
||||
validate_sign() {
|
||||
[[ -n "${FLAGS_signature_size}" ]] ||
|
||||
die "You must specify signature size with --signature_size SIZES"
|
||||
|
||||
[[ -n "${FLAGS_unsigned_payload}" ]] ||
|
||||
die "You must specify the input unsigned payload with \
|
||||
--unsigned_payload FILENAME"
|
||||
|
||||
[[ -n "${FLAGS_payload}" ]] ||
|
||||
die "You must specify the output signed payload with --payload FILENAME"
|
||||
|
||||
[[ -n "${FLAGS_payload_signature_file}" ]] ||
|
||||
die "You must specify the payload signature file with \
|
||||
--payload_signature_file SIGNATURES"
|
||||
|
||||
[[ -n "${FLAGS_metadata_signature_file}" ]] ||
|
||||
die "You must specify the metadata signature file with \
|
||||
--metadata_signature_file SIGNATURES"
|
||||
}
|
||||
|
||||
cmd_sign() {
|
||||
GENERATOR_ARGS=(
|
||||
-in_file="${FLAGS_unsigned_payload}"
|
||||
-signature_size="${FLAGS_signature_size}"
|
||||
-signature_file="${FLAGS_payload_signature_file}"
|
||||
-metadata_signature_file="${FLAGS_metadata_signature_file}"
|
||||
-out_file="${FLAGS_payload}"
|
||||
)
|
||||
|
||||
if [[ -n "${FLAGS_metadata_size_file}" ]]; then
|
||||
GENERATOR_ARGS+=( --out_metadata_size_file="${FLAGS_metadata_size_file}" )
|
||||
fi
|
||||
|
||||
"${GENERATOR}" "${GENERATOR_ARGS[@]}"
|
||||
echo "Done signing payload."
|
||||
}
|
||||
|
||||
validate_properties() {
|
||||
[[ -n "${FLAGS_payload}" ]] ||
|
||||
die "You must specify the payload file with --payload FILENAME"
|
||||
|
||||
[[ -n "${FLAGS_properties_file}" ]] ||
|
||||
die "You must specify a non empty --properties_file FILENAME"
|
||||
}
|
||||
|
||||
cmd_properties() {
|
||||
"${GENERATOR}" \
|
||||
-in_file="${FLAGS_payload}" \
|
||||
-properties_file="${FLAGS_properties_file}"
|
||||
}
|
||||
|
||||
# Sanity check that the real generator exists:
|
||||
GENERATOR="$(which delta_generator || true)"
|
||||
[[ -x "${GENERATOR}" ]] || die "can't find delta_generator"
|
||||
|
||||
case "$COMMAND" in
|
||||
generate) validate_generate
|
||||
cmd_generate
|
||||
;;
|
||||
hash) validate_hash
|
||||
cmd_hash
|
||||
;;
|
||||
sign) validate_sign
|
||||
cmd_sign
|
||||
;;
|
||||
properties) validate_properties
|
||||
cmd_properties
|
||||
;;
|
||||
esac
|
Loading…
Add table
Add a link
Reference in a new issue