Changeset View
Changeset View
Standalone View
Standalone View
contrib/teamcity/build-configurations.py
#!/usr/bin/env python3 | #!/usr/bin/env python3 | ||||
# Copyright (c) 2020 The Bitcoin developers | # Copyright (c) 2020 The Bitcoin developers | ||||
# Distributed under the MIT software license, see the accompanying | # Distributed under the MIT software license, see the accompanying | ||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php. | # file COPYING or http://www.opensource.org/licenses/mit-license.php. | ||||
import argparse | import argparse | ||||
import asyncio | import asyncio | ||||
import os | import os | ||||
import random | |||||
import shutil | import shutil | ||||
import stat | import stat | ||||
import subprocess | import subprocess | ||||
import sys | import sys | ||||
from pathlib import Path, PurePath | from pathlib import Path, PurePath | ||||
from string import Template | from string import Template | ||||
import yaml | import yaml | ||||
▲ Show 20 Lines • Show All 125 Lines • ▼ Show 20 Lines | def create_script_file(self, dest, content): | ||||
f.write( | f.write( | ||||
template.safe_substitute( | template.safe_substitute( | ||||
**self.environment_variables, | **self.environment_variables, | ||||
SCRIPT_CONTENT=content, | SCRIPT_CONTENT=content, | ||||
) | ) | ||||
) | ) | ||||
dest.chmod(dest.stat().st_mode | stat.S_IEXEC) | dest.chmod(dest.stat().st_mode | stat.S_IEXEC) | ||||
def create_build_steps(self, artifact_dir): | def create_build_steps(self, artifact_dir, preview_url, ip_address): | ||||
# There are 3 possibilities to define the build steps: | # There are 3 possibilities to define the build steps: | ||||
# - By manually defining a script to run. | # - By manually defining a script to run. | ||||
# - By specifying a docker configuration to build | # - By specifying a docker configuration to build | ||||
# - By defining the configuration options and a list of target groups to | # - By defining the configuration options and a list of target groups to | ||||
# run. The configuration step should be run once then all the targets | # run. The configuration step should be run once then all the targets | ||||
# groups. Each target group can contain 1 or more targets which | # groups. Each target group can contain 1 or more targets which | ||||
# should be run parallel. | # should be run parallel. | ||||
script = self.config.get("script", None) | script = self.config.get("script", None) | ||||
Show All 36 Lines | def create_build_steps(self, artifact_dir, preview_url, ip_address): | ||||
# Docker build | # Docker build | ||||
self.build_steps.append( | self.build_steps.append( | ||||
{ | { | ||||
"bin": "docker", | "bin": "docker", | ||||
"args": ["build"] + dockerfile_args + ["-t", tag_name, str(context)], | "args": ["build"] + dockerfile_args + ["-t", tag_name, str(context)], | ||||
} | } | ||||
) | ) | ||||
port = docker_config.get("port", None) | inner_port = docker_config.get("port", None) | ||||
port_args = ["-p", f"{port}:{port}"] if port else [] | outer_port = random.randrange(41000, 42000) | ||||
port_args = ["-p", f"{outer_port}:{inner_port}"] if inner_port else [] | |||||
# Docker run. This uses a timeout value to stop the container after | # Docker run. This uses a timeout value to stop the container after | ||||
# some time. The stop signal is defined to sigterm so the app has a | # some time. The stop signal is defined to sigterm so the app has a | ||||
# chance of gracefully handle the stop request, and defaults to a | # chance of gracefully handle the stop request, and defaults to a | ||||
# less subtle SIGKILL if it didn't abort after a minute. | # less subtle SIGKILL if it didn't abort after a minute. | ||||
self.build_steps.append( | self.build_steps.append( | ||||
{ | { | ||||
"bin": "docker", | "bin": "docker", | ||||
"args": ["run", "--rm", "-d", "--name", tag_name, "--stop-signal", "SIGTERM", "--stop-timeout", "60"] + port_args + [tag_name], | "args": ["run", "--rm", "-d", "--name", tag_name, "--stop-signal", "SIGTERM", "--stop-timeout", "60"] + port_args + [tag_name], | ||||
} | } | ||||
) | ) | ||||
timeout_minutes = docker_config.get("timeout_minutes", 60) | timeout_minutes = docker_config.get("timeout_minutes", 60) | ||||
# Write the address to stdout and to the preview_url log file | |||||
preview_msg = f"Preview is available at http://{ip_address}:{outer_port} for the next {timeout_minutes} minutes." | |||||
with open(preview_url, 'w', encoding='utf-8') as f: | |||||
f.write(preview_msg) | |||||
self.build_steps.append( | |||||
{ | |||||
"bin": "echo", | |||||
"args": [preview_msg], | |||||
} | |||||
) | |||||
# Now we need to schedule a job to stop or kill the container after | # Now we need to schedule a job to stop or kill the container after | ||||
# the timeout expires. | # the timeout expires. | ||||
script_file = self.build_directory.joinpath("docker_timeout.sh") | script_file = self.build_directory.joinpath("docker_timeout.sh") | ||||
self.create_script_file( | self.create_script_file( | ||||
script_file, | script_file, | ||||
f'echo "docker stop {tag_name}" | at now +{timeout_minutes} minutes') | f'echo "docker stop {tag_name}" | at now +{timeout_minutes} minutes') | ||||
self.build_steps.append( | self.build_steps.append( | ||||
▲ Show 20 Lines • Show All 138 Lines • ▼ Show 20 Lines | def __init__(self, configuration): | ||||
"build.clean.log") | "build.clean.log") | ||||
self.logs["full_log"] = build_directory.joinpath("build.full.log") | self.logs["full_log"] = build_directory.joinpath("build.full.log") | ||||
# Clean the build directory before any build step is run. | # Clean the build directory before any build step is run. | ||||
if self.configuration.build_directory.is_dir(): | if self.configuration.build_directory.is_dir(): | ||||
shutil.rmtree(self.configuration.build_directory) | shutil.rmtree(self.configuration.build_directory) | ||||
self.configuration.build_directory.mkdir(exist_ok=True, parents=True) | self.configuration.build_directory.mkdir(exist_ok=True, parents=True) | ||||
self.preview_url = build_directory.joinpath("preview_url.log") | |||||
self.ip_address = '127.0.0.1' | |||||
def copy_artifacts(self, artifacts): | def copy_artifacts(self, artifacts): | ||||
# Make sure the artifact directory always exists. It is created before | # Make sure the artifact directory always exists. It is created before | ||||
# the build is run (to let the build install things to it) but since we | # the build is run (to let the build install things to it) but since we | ||||
# have no control on what is being executed, it might very well be | # have no control on what is being executed, it might very well be | ||||
# deleted by the build as well. This can happen when the artifacts | # deleted by the build as well. This can happen when the artifacts | ||||
# are located in the build directory and the build calls git clean. | # are located in the build directory and the build calls git clean. | ||||
self.artifact_dir.mkdir(exist_ok=True) | self.artifact_dir.mkdir(exist_ok=True) | ||||
▲ Show 20 Lines • Show All 124 Lines • ▼ Show 20 Lines | async def wait_for_build(self, timeout, args=None): | ||||
# Always add the build logs to the root of the artifacts | # Always add the build logs to the root of the artifacts | ||||
artifacts = { | artifacts = { | ||||
**self.configuration.get("artifacts", {}), | **self.configuration.get("artifacts", {}), | ||||
str(self.logs["full_log"].relative_to(build_directory)): "", | str(self.logs["full_log"].relative_to(build_directory)): "", | ||||
str(self.logs["clean_log"].relative_to(build_directory)): "", | str(self.logs["clean_log"].relative_to(build_directory)): "", | ||||
str(self.configuration.junit_reports_dir.relative_to(build_directory)): "", | str(self.configuration.junit_reports_dir.relative_to(build_directory)): "", | ||||
str(self.configuration.test_logs_dir.relative_to(build_directory)): "", | str(self.configuration.test_logs_dir.relative_to(build_directory)): "", | ||||
str(self.configuration.functional_test_logs.relative_to(build_directory)): "functional", | str(self.configuration.functional_test_logs.relative_to(build_directory)): "functional", | ||||
str(self.preview_url.relative_to(build_directory)): "", | |||||
} | } | ||||
self.copy_artifacts(artifacts) | self.copy_artifacts(artifacts) | ||||
return (return_code, message) | return (return_code, message) | ||||
def run(self, args=None): | def run(self, args=None): | ||||
args = args if args is not None else [] | args = args if args is not None else [] | ||||
if self.artifact_dir.is_dir(): | if self.artifact_dir.is_dir(): | ||||
shutil.rmtree(self.artifact_dir) | shutil.rmtree(self.artifact_dir) | ||||
self.artifact_dir.mkdir(exist_ok=True) | self.artifact_dir.mkdir(exist_ok=True) | ||||
self.configuration.create_build_steps(self.artifact_dir) | self.configuration.create_build_steps( | ||||
self.artifact_dir, self.preview_url, self.ip_address) | |||||
return_code, message = asyncio.run( | return_code, message = asyncio.run( | ||||
self.wait_for_build( | self.wait_for_build( | ||||
self.configuration.get( | self.configuration.get( | ||||
"timeout", DEFAULT_TIMEOUT)) | "timeout", DEFAULT_TIMEOUT)) | ||||
) | ) | ||||
return (return_code, message) | return (return_code, message) | ||||
class TeamcityBuild(UserBuild): | class TeamcityBuild(UserBuild): | ||||
def __init__(self, configuration): | def __init__(self, configuration): | ||||
super().__init__(configuration) | super().__init__(configuration) | ||||
# This accounts for the volume mapping from the container. | # This accounts for the volume mapping from the container. | ||||
# Our local /results is mapped to some relative ./results on the host, | # Our local /results is mapped to some relative ./results on the host, | ||||
# so we use /results/artifacts to copy our files but results/artifacts as | # so we use /results/artifacts to copy our files but results/artifacts as | ||||
# an artifact path for teamcity. | # an artifact path for teamcity. | ||||
# TODO abstract out the volume mapping | # TODO abstract out the volume mapping | ||||
self.artifact_dir = Path("/results/artifacts") | self.artifact_dir = Path("/results/artifacts") | ||||
self.teamcity_messages = TeamcityServiceMessages() | self.teamcity_messages = TeamcityServiceMessages() | ||||
# Only gather the public IP if we are running on a TC build agent | |||||
from whatismyip import whatismyip | |||||
self.ip_address = whatismyip() | |||||
def copy_artifacts(self, artifacts): | def copy_artifacts(self, artifacts): | ||||
super().copy_artifacts(artifacts) | super().copy_artifacts(artifacts) | ||||
# Start loading the junit reports. | # Start loading the junit reports. | ||||
junit_reports_pattern = f"{str(self.artifact_dir.relative_to('/'))}/junit/*.xml" | junit_reports_pattern = f"{str(self.artifact_dir.relative_to('/'))}/junit/*.xml" | ||||
self.teamcity_messages.importData("junit", junit_reports_pattern) | self.teamcity_messages.importData("junit", junit_reports_pattern) | ||||
# Instruct teamcity to upload our artifact directory | # Instruct teamcity to upload our artifact directory | ||||
▲ Show 20 Lines • Show All 83 Lines • Show Last 20 Lines |