diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..beba5119 --- /dev/null +++ b/.gitignore @@ -0,0 +1,44 @@ +# Copyright (c) 2012-2014 LG Electronics, Inc. + +# A leading / matches only entries in the top-level directory +# A trailing / matches only directories +# A trailing / is not added to directories which are often replaced with symlink + +# Common Eclipse project files +.project +.cproject +.pc + +# Editor backup files +*~ + +# Build products worth keeping between clean builds +/sstate-cache +/buildhistory +/downloads +/cache + +# Other build products +/BUILD/ +/BUILD-ARTIFACTS/ +/Makefile +TAGS +/bitbake.lock +/mcf.status +/oe-init-build-env +/conf/ +__pycache__/ +tmp/ +patches/ + +# Artifacts from generating build_changes.log +build_changes.log +latest_project_baselines*txt + +# Checkouts managed by mcf +/bitbake +/oe-core +/meta-* + +# Local override file +/webos-local.conf diff --git a/NOTICE b/NOTICE new file mode 100644 index 00000000..004ec5b2 --- /dev/null +++ b/NOTICE @@ -0,0 +1,3 @@ + Copyright (c) 2013 LG Electronics, Inc. + +This software contains code licensed as described in COPYING.MIT diff --git a/README.md b/README.md new file mode 100644 index 00000000..2184a49f --- /dev/null +++ b/README.md @@ -0,0 +1,103 @@ +build-webos +=========== + +Summary +------- +Build webOS OSE (Open Source Edition) images + +Description +----------- +This repository contains the top level code that aggregates the various [OpenEmbedded](http://openembedded.org) layers into a whole from which webOS OSE images can be built. + +Cloning +======= +Set up build-webos by cloning its Git repository: + + git clone https://github.com/webosose/build-webos.git + +Note: If you populate it by downloading an archive (zip or tar.gz file), then you will get the following error when you run mcf: + + fatal: Not a git repository (or any parent up to mount parent). + Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYTEM not set). + + +Prerequisites +============= +Before you can build, you will need some tools. If you try to build without them, bitbake will fail a sanity check and tell you what's missing, but not really how to get the missing pieces. On Ubuntu, you can force all of the missing pieces to be installed by entering: + + $ sudo scripts/prerequisites.sh + +Also, the bitbake sanity check will issue a warning if you're not running under Ubuntu 14.04 64bit LTS. + + +Building +======== +To configure the build for the raspberrypi3 and to fetch the sources: + + $ ./mcf -p 0 -b 0 raspberrypi3 + +The `-p 0` and `-b 0` options set the make and bitbake parallelism values to the number of CPU cores found on your computer. + +To kick off a full build of webOS OSE, make sure you have at least 100GB of disk space available and enter the following: + + $ make webos-image + +This may take in the neighborhood of two hours on a multi-core workstation with a fast disk subsystem and lots of memory, or many more hours on a laptop with less memory and slower disks or in a VM. + + +Images +====== +The following images can be built: + +- `webos-image`: The production webOS OSE image. +- `webos-image-devel`: Adds various development tools to `webos-image`, including gdb and strace. See `packagegroup-core-tools-debug` and `packagegroup-core-tools-profile` in `oe-core` and `packagegroup-webos-test` in `meta-webos` for the complete list. + + +Cleaning +======== +To blow away the build artifacts and prepare to do clean build, you can remove the build directory and recreate it by typing: + + $ rm -rf BUILD + $ ./mcf.status + +What this retains are the caches of downloaded source (under `./downloads`) and shared state (under `./sstate-cache`). These caches will save you a tremendous amount of time during development as they facilitate incremental builds, but can cause seemingly inexplicable behavior when corrupted. If you experience strangeness, use the command presented below to remove the shared state of suspicious components. In extreme cases, you may need to remove the entire shared state cache. See [here](http://www.yoctoproject.org/docs/latest/overview-manual/overview-manual.html#shared-state-cache) for more information on it. + + +Building Individual Components +============================== +To build an individual component, enter: + + $ make + +To clean a component's build artifacts under BUILD, enter: + + $ make clean- + +To remove the shared state for a component as well as its build artifacts to ensure it gets rebuilt afresh from its source, enter: + + $ make cleanall- + +Adding new layers +================= +The script automates the process of adding new OE layers to the build environment. The information required for integrate new layer are; layer name, OE priority, repository, identification in the form branch, commit or tag ids. It is also possible to reference a layer from local storage area. The details are documented in weboslayers.py. + +Copyright and License Information +================================= +Unless otherwise specified, all content, including all source code files and +documentation files in this repository are: + +Copyright (c) 2008-2018 LG Electronics, Inc. + +Unless otherwise specified or set forth in the NOTICE file, all content, +including all source code files and documentation files in this repository are: +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this content except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/build-templates/Makefile.in b/build-templates/Makefile.in new file mode 100644 index 00000000..67652263 --- /dev/null +++ b/build-templates/Makefile.in @@ -0,0 +1,76 @@ +# DO NOT MODIFY! This script is generated by mcf. Changes made +# here will be lost. The source for this file is in build-templates/Makefile.in. +# +# Copyright (c) 2008-2014 LG Electronics, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +srcdir := @srcdir@ +machines := @machines@ + +TIME := time + +webos-image: +all: webos-image + +force:;: + +oe-init-build-env mcf.status Makefile conf/local.conf conf/bblayers.conf: \ + $(srcdir)/build-templates/oe-init-build-env.in \ + $(srcdir)/build-templates/mcf-status.in \ + $(srcdir)/build-templates/Makefile.in \ + $(srcdir)/build-templates/local-conf.in \ + $(srcdir)/build-templates/bblayers-conf.in + ./mcf.status + +# everything else is already set by oe-init-build-env +BITBAKE := . $(srcdir)/oe-init-build-env && bitbake + +### intended for command line use +BBFLAGS = + +%:; for MACHINE in $(machines) ; do $(BITBAKE) $(BBFLAGS) $*; done + +define convenience +$(1)-$(2)-%:; export MACHINE=$(1) && $(TIME) $(MAKE) $(2)-$$* +$(1)-%:; for MACHINE in $(machines) ; do $$(BITBAKE) $(BBFLAGS) -c $(1) $$*; done +endef + +conveniences := \ + clean \ + cleanall \ + cleansstate \ + compile \ + configure \ + fetch \ + fetchall \ + install \ + listtasks \ + package \ + patch \ + patchall \ + unpack \ + unpackall \ + +# Used by _bitbake() in bitbake-bash_completion +_display_conveniences: + @echo $(conveniences) + +$(foreach c, $(conveniences),$(eval $(call convenience,$(c)))) + +# In most cases, "install-foo" is a coded request +# for 'bitbake -c install foo'. However, at least one component has a +# name prefixed by "install-". Hence the need for the "just-" target +# which lets us name "just-install-first" in order to request +# "install-first". (Yes, I'm sorry it's complicated.) +just-%:; $(BITBAKE) $(BBFLAGS) $* diff --git a/build-templates/bblayers-conf.in b/build-templates/bblayers-conf.in new file mode 100644 index 00000000..5eac3acf --- /dev/null +++ b/build-templates/bblayers-conf.in @@ -0,0 +1,45 @@ +# Copyright (c) 2008-2017 LG Electronics, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +# LAYER_CONF_VERSION is increased each time build/conf/bblayers.conf +# changes incompatibly +LCONF_VERSION = "7" + +BBPATH = "${TOPDIR}" + +# Pull in two optional configuration files to allow the user to override +# component source directories, shared state locations, etc. +# +# webos-global.conf (in the user's home directory) applies overrides to +# all clones of openwebos/build-webos in the user's account. +# +# webos-local.conf resides at the top of the build-webos repo and applies +# overrides on a per-repo basis. +# +# Including both here saves the user remmebering to chain to the local +# file from the global one, avoids them forgetting to do so, and makes +# the existence of a global override file optional. +# +# The location of the shared-state cache can be moved by overriding +# DL_DIR and SSTATE_DIR. +# +# The meta-webos layer can be moved out-of-tree by overriding WEBOS_LAYER. +# Note that running mcf will still clone and checkout a meta-webos directory +# in the root of the repo, but "make" will ignore it and use the overridden +# location for recipes etc. The first time you move a meta-webos layer out of +# tree may invalidate your shared state information, as a result of recloning +# the meta-webos layer. + +# NOTE: directory names META_*LAYER are used in scripts/bitbake.bash_completion diff --git a/build-templates/local-conf.in b/build-templates/local-conf.in new file mode 100644 index 00000000..6194fc39 --- /dev/null +++ b/build-templates/local-conf.in @@ -0,0 +1,492 @@ +# DO NOT MODIFY! This script is generated by configure. Changes made +# here will be lost. Source for this file is in local-conf.in. + +# Copyright (c) 2008-2017 LG Electronics, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# This file is your local configuration file and is where all local user settings +# are placed. The comments in this file give some guide to the options a new user +# to the system might want to change but pretty much any configuration option can +# be set in this file. More adventurous users can look at local.conf.extended +# which contains other examples of configuration which can be placed in this file +# but new users likely won't need any of them initially. +# +# Lines starting with the '#' character are commented out and in some cases the +# default values are provided as comments to show people example syntax. Enabling +# the option is a question of removing the # character and making any change to the +# variable as required. +# +# export DISTRO variable in your shell environment +# +# This sets the default distro to be whatever is selected by mcf if no other distro +# is selected: +DISTRO ??= "@distro@" + +# +# Parallelism Options +# +# These two options control how much parallelism BitBake should use. The first +# option determines how many tasks bitbake should run in parallel. The default +# value of 2x the number of cores is that recommended by OE -- see: +# http://www.yoctoproject.org/docs/1.4/ref-manual/ref-manual.html#var-BB_NUMBER_THREADS +# This may appear counter-intuitive, but that this is a good choice was confirmed +# by testing whose results are reported here: http://wiki.lgsvl.com/x/QYW0BQ and +# in [ES-469]. +# +BB_NUMBER_THREADS ?= "${@ @bb_number_threads@ if @bb_number_threads@ != 0 else 2*int(bb.utils.cpu_count())}" + +# +# The second option controls how many processes make should run in parallel when +# running compile tasks. The default value of 2x the number of cores is that +# recommended by OE -- see: +# http://www.yoctoproject.org/docs/1.4/ref-manual/ref-manual.html#var-BB_NUMBER_THREADS +# This may appear counter-intuitive, but that this is a good choice was confirmed +# by testing whose results are reported here: http://wiki.lgsvl.com/x/QYW0BQ and +# in [ES-469]. +# +PARALLEL_MAKE ?= "-j ${@ @parallel_make@ if @parallel_make@ != 0 else 2*int(bb.utils.cpu_count())}" + +# +# Building with icecc is enabled by default. the following configures webOS image +# to build with icecc +# +# To disable ICECC, set ICECC_DISABLED to 1 +ICECC_DISABLED ?= "@icecc_disable_enable@" +# Number of parallel make threads +ICECC_PARALLEL_MAKE ?= "-j ${@ @icecc_parallel_make@ if @icecc_parallel_make@ != 0 else 4*int(bb.utils.cpu_count())}" +# To use alterantive icecc installation specify the location of icecc executable below. +@alternative_icecc_installation@ +# To exclude components/recipes from icecc build add its name to the space separated list below +@icecc_user_package_blacklist@ +# To exclude class of components/recipes from icecc build add its name to the space separated list below; i.e. native +@icecc_user_class_blacklist@ +# To force components/recipes to build with icecc, add its name to space separated list below +@icecc_user_package_whitelist@ +# tp Specify alternative script for icecc environment setup, specify the script below. +@icecc_environment_script@ + +# +# Machine Selection +# +# You need to select a specific machine to target the build with. There are a selection +# of emulated machines available which can boot and run in the QEMU emulator: +# +# export MACHINE variable in your shell environment +# +# This sets the default machine to be qemux86 if no other machine is selected: +MACHINE ??= "qemux86" + +# +# Where to place downloads +# +# During a first build the system will download many different source code tarballs +# from various upstream projects. This can take a while, particularly if your network +# connection is slow. These are all stored in DL_DIR. When wiping and rebuilding you +# can preserve this directory to speed up this part of subsequent builds. This directory +# is safe to share between multiple builds on the same machine too. +# +# The default is a downloads directory under TOPDIR which is the build directory. +# +DL_DIR ?= "${TOPDIR}/downloads" + +# +# Where to place shared-state files +# +# BitBake has the capability to accelerate builds based on previously built output. +# This is done using "shared state" files which can be thought of as cache objects +# and this option determines where those files are placed. +# +# You can wipe out TMPDIR leaving this directory intact and the build would regenerate +# from these files if no changes were made to the configuration. If changes were made +# to the configuration, only shared state files where the state was still valid would +# be used (done using checksums). +# +# The default is a sstate-cache directory under TOPDIR. +# +SSTATE_DIR ?= "${TOPDIR}/sstate-cache" + +# +# Where to place the build output +# +# This option specifies where the bulk of the building work should be done and +# where BitBake should place its temporary files and output. Keep in mind that +# this includes the extraction and compilation of many applications and the toolchain +# which can use Gigabytes of hard disk space. +# +# The default is a tmp directory under TOPDIR. +# +TMPDIR = "${TOPDIR}/BUILD" + + +# +# Package Management configuration +# +# This variable lists which packaging formats to enable. Multiple package backends +# can be enabled at once and the first item listed in the variable will be used +# to generate the root filesystems. +# Options are: +# - 'package_deb' for debian style deb files +# - 'package_ipk' for ipk files are used by opkg (a debian style embedded package manager) +# - 'package_rpm' for rpm style packages +# E.g.: PACKAGE_CLASSES ?= "package_rpm package_deb package_ipk" +# We default to ipk: +PACKAGE_CLASSES ?= "package_ipk" + +# +# SDK/ADT target architecture +# +# This variable specified the architecture to build SDK/ADT items for and means +# you can build the SDK packages for architectures other than the machine you are +# running the build on (i.e. building i686 packages on an x86_64 host._ +# Supported values are i686 and x86_64 +# +# Using ??= so that it can be overridden in webos-local.conf without having to +# resort to using _forcevariable. +SDKMACHINE ??= "${BUILD_ARCH}" + +# +# Extra image configuration defaults +# +# The EXTRA_IMAGE_FEATURES variable allows extra packages to be added to the generated +# images. Some of these options are added to certain image types automatically. The +# variable can contain the following options: +# "dbg-pkgs" - add -dbg packages for all installed packages +# (adds symbol information for debugging/profiling) +# "dev-pkgs" - add -dev packages for all installed packages +# (useful if you want to develop against libs in the image) +# "tools-sdk" - add development tools (gcc, make, pkgconfig etc.) +# "tools-debug" - add debugging tools (gdb, strace) +# "tools-profile" - add profiling tools (oprofile, exmap, lttng valgrind (x86 only)) +# "tools-testapps" - add useful testing tools (ts_print, aplay, arecord etc.) +# "debug-tweaks" - make an image suitable for development +# e.g. ssh root access has a blank password +# There are other application targets that can be used here too, see +# meta/classes/image.bbclass and meta/classes/core-image.bbclass for more details. +EXTRA_IMAGE_FEATURES ?= "" + +# +# Additional image features +# +# The following is a list of additional classes to use when building images which +# enable extra features. Some available options which can be included in this variable +# are: +# - 'buildhistory' collect statistics from build artifacts +# - 'buildstats' collect build statistics +# - 'image-mklibs' to reduce shared library files size for an image +# - 'image-prelink' in order to prelink the filesystem image +# - 'image-swab' to perform host system intrusion detection +# NOTE: if listing mklibs & prelink both, then make sure mklibs is before prelink +# NOTE: mklibs also needs to be explicitly enabled for a given image, see local.conf.extended +USER_CLASSES ?= "@buildhistory_class@ buildstats image-mklibs" + +BUILDHISTORY_DIR ?= "${TOPDIR}/buildhistory" +BUILDHISTORY_COMMIT ?= "@buildhistory_enabled@" +@buildhistory_author_assignment@ + +# +# Runtime testing of images +# +# The build system can test booting virtual machine images under qemu (an emulator) +# after any root filesystems are created and run tests against those images. To +# enable this uncomment this line +#IMAGETEST = "qemu" +# +# This variable controls which tests are run against virtual images if enabled +# above. The following would enable bat, boot the test case under the sanity suite +# and perform toolchain tests +#TEST_SCEN = "sanity bat sanity:boot toolchain" +# +# Because of the QEMU booting slowness issue (see bug #646 and #618), the +# autobuilder may suffer a timeout issue when running sanity tests. We introduce +# the variable TEST_SERIALIZE here to reduce the time taken by the sanity tests. +# It is set to 1 by default, which will boot the image and run cases in the same +# image without rebooting or killing the machine instance. If it is set to 0, the +# image will be copied and tested for each case, which will take longer but be +# more precise. +#TEST_SERIALIZE = "1" + +# +# Interactive shell configuration +# +# Under certain circumstances the system may need input from you and to do this it +# can launch an interactive shell. It needs to do this since the build is +# multithreaded and needs to be able to handle the case where more than one parallel +# process may require the user's attention. The default is iterate over the available +# terminal types to find one that works. +# +# Examples of the occasions this may happen are when resolving patches which cannot +# be applied, to use the devshell or the kernel menuconfig +# +# Supported values are auto, gnome, xfce, rxvt, screen, konsole (KDE 3.x only), none +# Note: currently, Konsole support only works for KDE 3.x due to the way +# newer Konsole versions behave +#OE_TERMINAL = "auto" +# By default disable interactive patch resolution (tasks will just fail instead): +PATCHRESOLVE = "noop" + +# PREMIRROR? +@premirror_assignment@ +@premirror_inherit@ + +# network or no +BB_NO_NETWORK := "@no_network@" + +# premirror only? +BB_FETCH_PREMIRRORONLY := "@fetchpremirroronly@" + +# mirror tarballs +BB_GENERATE_MIRROR_TARBALLS := "@generatemirrortarballs@" + +# +# Shared-state files from other locations +# +# As mentioned above, shared state files are prebuilt cache data objects which can +# used to accelerate build time. This variable can be used to configure the system +# to search other mirror locations for these objects before it builds the data itself. +# +# This can be a filesystem directory, or a remote url such as http or ftp. These +# would contain the sstate-cache results from previous builds (possibly from other +# machines). This variable works like fetcher MIRRORS/PREMIRRORS and points to the +# cache locations to check for the shared objects. +#SSTATE_MIRRORS ?= "\ +#file://.* http://someserver.tld/share/sstate/PATH \n \ +#file://.* file:///some/local/dir/sstate/PATH" +@sstatemirror_assignment@ + +# CONF_VERSION is increased each time build/conf/ changes incompatibly and is used to +# track the version of this file when it was generated. This can safely be ignored if +# this doesn't mean anything to you. +CONF_VERSION = "1" + +# local.conf.sample.extended starts here. +# BBMASK is a regular expression that can be used to tell BitBake to ignore +# certain recipes. +#BBMASK = "" + +# eglibc configurability is used to reduce minimal image's size. +# the all supported eglibc options are listed in DISTRO_FEATURES_LIBC +# and disabled by default. Uncomment and copy the DISTRO_FEATURES_LIBC +# and DISTRO_FEATURES definitions to local.conf to enable the options. +#DISTRO_FEATURES_LIBC = "ipv6 libc-backtrace libc-big-macros libc-bsd libc-cxx-tests libc-catgets libc-charsets libc-crypt \ +# libc-crypt-ufc libc-db-aliases libc-envz libc-fcvt libc-fmtmsg libc-fstab libc-ftraverse \ +# libc-getlogin libc-idn libc-inet libc-inet-anl libc-libm libc-libm-big libc-locales libc-locale-code \ +# libc-memusage libc-nis libc-nsswitch libc-rcmd libc-rtld-debug libc-spawn libc-streams libc-sunrpc \ +# libc-utmp libc-utmpx libc-wordexp libc-posix-clang-wchar libc-posix-regexp libc-posix-regexp-glibc \ +# libc-posix-wchar-io" + +#DISTRO_FEATURES = "alsa bluetooth ext2 irda pcmcia usbgadget usbhost wifi nfs zeroconf pci ${DISTRO_FEATURES_LIBC}" + +# If you want to get an image based on gtk+directfb without x11, Please copy this variable to build/conf/local.conf +#DISTRO_FEATURES = "alsa argp bluetooth ext2 irda largefile pcmcia usbgadget usbhost wifi xattr nfs zeroconf pci 3g directfb ${DISTRO_FEATURES_LIBC}" + +# ENABLE_BINARY_LOCALE_GENERATION controls the generation of binary locale +# packages at build time using qemu-native. Disabling it (by setting it to 0) +# will save some build time at the expense of breaking i18n on devices with +# less than 128MB RAM. +#ENABLE_BINARY_LOCALE_GENERATION = "1" + +# Set GLIBC_GENERATE_LOCALES to the locales you wish to generate should you not +# wish to perform the time-consuming step of generating all LIBC locales. +# NOTE: If removing en_US.UTF-8 you will also need to uncomment, and set +# appropriate values for IMAGE_LINGUAS and LIMIT_BUILT_LOCALES +# WARNING: this may break localisation! +#GLIBC_GENERATE_LOCALES = "en_GB.UTF-8 en_US.UTF-8" +# See message above as to whether setting these is required +#IMAGE_LINGUAS ?= "en-gb" +#LIMIT_BUILT_LOCALES ?= "POSIX en_GB" + +# The following are used to control options related to debugging. +# +# Uncomment this to change the optimization to make debugging easer, at the +# possible cost of performance. +# DEBUG_BUILD = "1" +# +# Uncomment this to disable the stripping of the installed binaries +# INHIBIT_PACKAGE_STRIP = "1" +# +# Uncomment this to disable the split of the debug information into -dbg files +# INHIBIT_PACKAGE_DEBUG_SPLIT = "1" +# +# When splitting debug information, the following controls the results of the +# file splitting. +# +# .debug (default): +# When splitting the debug information will be placed into +# a .debug directory in the same dirname of the binary produced: +# /bin/foo -> /bin/.debug/foo +# +# debug-file-directory: +# When splitting the debug information will be placed into +# a central debug-file-directory, /usr/lib/debug: +# /bin/foo -> /usr/lib/debug/bin/foo.debug +# +# Any source code referenced in the debug symbols will be copied +# and made available within the /usr/src/debug directory +# +#PACKAGE_DEBUG_SPLIT_STYLE = '.debug' +# PACKAGE_DEBUG_SPLIT_STYLE = 'debug-file-directory' + +# Uncomment these to build a package such that you can use gprof to profile it. +# NOTE: This will only work with 'linux' targets, not +# 'linux-uclibc', as uClibc doesn't provide the necessary +# object files. Also, don't build glibc itself with these +# flags, or it'll fail to build. +# +# PROFILE_OPTIMIZATION = "-pg" +# SELECTED_OPTIMIZATION = "${PROFILE_OPTIMIZATION}" +# LDFLAGS =+ "-pg" + +# TCMODE controls the characteristics of the generated packages/images by +# telling poky which toolchain 'profile' to use. +# +# The default is "default" +# Use "external-MODE" to use the precompiled external toolchains where MODE +# is the type of external toolchain to use e.g. eabi. You need to ensure +# the toolchain you want to use is included in an appropriate layer +# TCMODE = "external-eabi" + +# mklibs library size optimization is more useful to smaller images, +# and less useful for bigger images. Also mklibs library optimization +# can break the ABI compatibility, so should not be applied to the +# images which are to be extended or upgraded later. +#This enabled mklibs library size optimization just for the specified image. +#MKLIBS_OPTIMIZED_IMAGES ?= "core-image-minimal" +#This enable mklibs library size optimization will be for all the images. +#MKLIBS_OPTIMIZED_IMAGES ?= "all" + +# Uncomment this if your host distribution provides the help2man tool. +#ASSUME_PROVIDED += "help2man-native" + +# This value is currently used by pseudo to determine if the recipe should +# build both the 32-bit and 64-bit wrapper libraries on a 64-bit build system. +# +# Pseudo will attempt to determine if a 32-bit wrapper is necessary, but +# it doesn't always guess properly. If you have 32-bit executables on +# your 64-bit build system, you likely want to set this to "0", +# otherwise you could end up with incorrect file attributes on the +# target filesystem. +# +# Default is to not build 32 bit libs on 64 bit systems, uncomment this +# if you need the 32 bits libs +#NO32LIBS = "0" + +# Uncomment the following lines to enable multilib builds +#require conf/multilib.conf +#MULTILIBS = "multilib:lib32" +#DEFAULTTUNE_virtclass-multilib-lib32 = "x86" + +# The network based PR service host and port +# Uncomment the following lines to enable PRservice. +# Set PRSERV_HOST to 'localhost' and PRSERV_PORT to '0' to automatically +# start local PRService. +# Set to other values to use remote PRService. +#PRSERV_HOST = "localhost" +#PRSERV_PORT = "0" + +# Additional image generation features +# +# The following is a list of classes to import to use in the generation of images +# currently an example class is image_types_uboot +# IMAGE_CLASSES = " image_types_uboot" + +# Incremental rpm image generation, the rootfs would be totally removed +# and re-created in the second generation by default, but with +# INC_RPM_IMAGE_GEN = "1", the rpm based rootfs would be kept, and will +# do update(remove/add some pkgs) on it. NOTE: This is not suggested +# when you want to create a productive rootfs +#INC_RPM_IMAGE_GEN = "1" + +# This is a list of packages that require a commercial license to ship +# product. If shipped as part of an image these packages may have +# implications so they are disabled by default. To enable them, +# un-comment the below as appropriate. +#LICENSE_FLAGS_WHITELIST = "commercial_gst-fluendo-mp3 \ +# commercial_gst-openmax \ +# commercial_gst-plugins-ugly \ +# commercial_lame \ +# commercial_libmad \ +# commercial_libomxil \ +# commercial_mpeg2dec \ +# commercial_qmmp" + + +# +# Disk space monitor, take action when the disk space or the amount of +# inode is running low, it is enabled when BB_DISKMON_DIRS is set. +# +# Set the directories to monitor for disk usage, if more than one +# directories are mounted in the same device, then only one directory +# would be monitored since the monitor is based on the device. +# The format is: +# "action,directory,minimum_space,minimum_free_inode" +# +# The "action" must be set and should be one of: +# ABORT: Immediately abort +# STOPTASKS: The new tasks can't be executed any more, will stop the build +# when the running tasks have been done. +# WARN: show warnings (see BB_DISKMON_WARNINTERVAL for more information) +# +# The "directory" must be set, any directory is OK. +# +# Either "minimum_space" or "minimum_free_inode" (or both of them) +# should be set, otherwise the monitor would not be enabled, +# the unit can be G, M, K or none, but do NOT use GB, MB or KB +# (B is not needed). +#BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},1G,100K WARN,${SSTATE_DIR},1G,100K" +# +# Set disk space and inode interval (only works when the action is "WARN", +# the unit can be G, M, or K, but do NOT use the GB, MB or KB +# (B is not needed), the format is: +# "disk_space_interval,disk_inode_interval", the default value is +# "50M,5K" which means that it would warn when the free space is +# lower than the minimum space(or inode), and would repeat the warning +# when the disk space reduces 50M (or the amount of inode reduces 5k). +#BB_DISKMON_WARNINTERVAL = "50M,5K" + +# Archiving source code configuration +# +# The following variables control which files to archive and the type to archive to generate. +# There are three basic class defintions of common operations that might be desired and these +# can be enabled by uncommenting one of the following lines: +# +# INHERIT += "archive-original-source" +# INHERIT += "archive-patched-source" +#INHERIT =+ "archive-configured-source" +# +# Type of archive: +# SOURCE_ARCHIVE_PACKAGE_TYPE = 'srpm' +#SOURCE_ARCHIVE_PACKAGE_TYPE ?= 'tar' +# +# Whether to include WORKDIR/temp, .bb and .inc files: +# 'logs_with_scripts' include WORKDIR/temp directory and .bb and .inc files +# 'logs' only include WORKDIR/temp +# ARCHIVER_MODE[log_type] = "logs logs_with_scripts" +# There are three basic class defintions of common operations +# that might be desired and these can be enabled by +# uncommenting five of the following lines: +# ARCHIVER_MODE[filter] ?= "yes no" +# Filter packages according to license +#ARCHIVER_MODE ?= "original" +#ARCHIVER_MODE[type] ?= "tar" +#ARCHIVER_MODE[log_type] ?= "logs_with_scripts" +#ARCHIVER_MODE[filter] ?= "no" +#ARCHIVER_CLASS = "${@'archive-${ARCHIVER_MODE}-source' if ARCHIVER_MODE != 'none' else ''}" +#INHERIT += "${ARCHIVER_CLASS}" + +# Other local settings +TCLIBCAPPEND := "" + +include ${HOME}/webos-global.conf +include ${TOPDIR}/webos-local.conf diff --git a/build-templates/mcf-status.in b/build-templates/mcf-status.in new file mode 100644 index 00000000..0eca114c --- /dev/null +++ b/build-templates/mcf-status.in @@ -0,0 +1,44 @@ +#!/bin/sh + +# DO NOT MODIFY! This script is generated by @prog@. Changes made +# here will be lost. Source for this file can be found in +# mcf-status.in. + +## +# Copyright (c) 2008-2014 LG Electronics, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +# Run this file to recreate the current configuration. + +set -e + +exec @prog@ \ + --enable-bb-number-threads=@bb_number_threads@ \ + --enable-parallel-make=@parallel_make@ \ + @icecc_disable_enable_mcf@ \ + --enable-icecc-parallel-make=@icecc_parallel_make@ \ + --enable-icecc-location=@alternative_icecc_installation_mcf@ \ + --enable-icecc-user-package-blacklist=@icecc_user_package_blacklist_mcf@ \ + --enable-icecc-user-class-blacklist=@icecc_user_class_blacklist_mcf@ \ + --enable-icecc-user-package-whitelist=@icecc_user_package_whitelist_mcf@ \ + --enable-icecc-env-exec=@icecc_environment_script_mcf@ \ + --premirror=@premirror@ \ + --sstatemirror=@sstatemirror@ \ + @buildhistory@ \ + --enable-buildhistoryauthor='@buildhistoryauthor@' \ + @network@ \ + @fetchpremirroronlyoption@ \ + @generatemirrortarballsoption@ \ + @machines@ diff --git a/build-templates/oe-init-build-env.in b/build-templates/oe-init-build-env.in new file mode 100644 index 00000000..4724c9ae --- /dev/null +++ b/build-templates/oe-init-build-env.in @@ -0,0 +1,49 @@ +# DO NOT MODIFY! This script is generated by mcf. Changes made +# here will be lost. The source for this file is in build-templates/oe-init-build-env.in. + +# Copyright (c) 2008-2014 LG Electronics, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +## +# See NOV-120063 for an example of how a users's LC_ALL setting can cause problems. +# FIXME: not clear if these are still needed with oe-core +unset LC_ALL; export LC_ALL +export LANG=en_US.UTF-8 + +export TOPDIR=@abs_srcdir@ + +BITBAKEPATH=${TOPDIR}/bitbake/bin +case "${PATH}" in +*${BITBAKEPATH}*) ;; +*) export PATH=${TOPDIR}/oe-core/scripts:${BITBAKEPATH}:$PATH ;; +esac + +if [ -z "$ZSH_NAME" ] && [ "$0" = "./oe-init-build-env" ]; then + echo "ERROR: This script needs to be sourced. Please run as '. ./oe-init-build-env'" + exit 1 +fi + +# used in runqemu bitbake wrapper for pseudodone location +export BUILDDIR="${TOPDIR}/BUILD" +export BB_ENV_EXTRAWHITE="MACHINE DISTRO TCMODE TCLIBC http_proxy ftp_proxy https_proxy all_proxy ALL_PROXY no_proxy SSH_AGENT_PID SSH_AUTH_SOCK BB_SRCREV_POLICY SDKMACHINE BB_NUMBER_THREADS PARALLEL_MAKE GIT_PROXY_COMMAND GIT_PROXY_IGNORE SOCKS5_PASSWD SOCKS5_USER WEBOS_DISTRO_BUILD_ID PSEUDO_DISABLED PSEUDO_BUILD" + +[ -z "${MACHINES}" ] && MACHINES="@machines@" +[ -z "${MACHINE}" ] && MACHINE="@machine@" +[ -z "${DISTRO}" ] && DISTRO="@distro@" + +export MACHINES MACHINE DISTRO +echo "Altered environment for ${MACHINE}@${DISTRO} development" + +# bash-completion for bitbake +[ -n "$BASH_VERSION" ] && . ${TOPDIR}/scripts/bitbake-bash_completion || true diff --git a/mcf b/mcf new file mode 100755 index 00000000..463659aa --- /dev/null +++ b/mcf @@ -0,0 +1,906 @@ +#!/usr/bin/env python3 +# Copyright (c) 2008-2017 LG Electronics, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import errno +import logging +import os +import subprocess +import sys +import re +from time import gmtime, strftime, sleep +import shutil +import glob + +__version__ = "6.2.2" + +logger = logging.getLogger(__name__) + +CLEAN = False +TRACE = False +REMOTE = "origin" +SSTATE_MIRRORS = '' +LAYERS = {} +DISTRO = None +SUPPORTED_MACHINES = [] + +def echo_check_call(todo, verbosity=False): + if verbosity or TRACE: + cmd = 'set -x; ' + todo + else: + cmd = todo + + logger.debug(cmd) + + return str(subprocess.check_output(cmd, shell=True), encoding='utf-8', errors='strict') + +def enable_trace(): + global TRACE + TRACE = True + +def enable_clean(): + logger.warn('Running in clean non-interactive mode, all possible local changes and untracked files will be removed') + global CLEAN + CLEAN = True + +def set_log_level(level): + logger = logging.getLogger(__name__) + logger.setLevel(logging.DEBUG) + f = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s', datefmt='%Y-%m-%dT%H:%M:%S') + + s = logging.StreamHandler() + s.setLevel(level) + + s.setFormatter(f) + logging.getLogger('').addHandler(s) + +# Essentially, mcf parses options, creates mcf.status, and runs mcf.status. + +def process_file(f, replacements): + (ifile, ofile) = f + with open(ifile, 'r') as f: + status = f.read() + + for i, j in replacements: + status = status.replace(i, j) + + odir = os.path.dirname(ofile) + if odir and not os.path.isdir(odir): + os.mkdir(odir) + with open(ofile, 'w') as f: + f.write(status) + +def getopts(): + mcfcommand_option = '--command' + mcfcommand_dest = 'mcfcommand' + # be careful when changing this, jenkins-job.sh is doing + # grep "mcfcommand_choices = \['configure', 'update', " + # to detect if it needs to explicitly run --command update after default action + mcfcommand_choices = ['configure', 'update', 'update+configure'] + mcfcommand_default = 'update+configure' + + # Just parse the --command argument here, so that we can select a parser + mcfcommand_parser = argparse.ArgumentParser(add_help=False) + mcfcommand_parser.add_argument(mcfcommand_option, dest=mcfcommand_dest, choices=mcfcommand_choices, default=mcfcommand_default) + mcfcommand_parser_result = mcfcommand_parser.parse_known_args() + mcfcommand = mcfcommand_parser_result[0].mcfcommand + + # Put --command back in (as the first option) so that the main parser sees everything + arglist = [mcfcommand_option, mcfcommand ] + mcfcommand_parser_result[1] + + parser = argparse.ArgumentParser() + + general = parser.add_argument_group('General Options') + + verbosity = general.add_mutually_exclusive_group() + + verbosity.add_argument('-s', '--silent', action='count', help='work silently, repeat the option twice to hide also the warnings, tree times to hide the errors as well') + verbosity.add_argument('-v', '--verbose', action='count', help='work verbosely, repeat the option twice for more debug output') + + general.add_argument('-c', '--clean', dest='clean', action='store_true', default=False, help='clean checkout - WARN: removes all local changes') + general.add_argument('-V', '--version', action='version', version='%(prog)s {0}'.format(__version__), help='print version and exit') + + general.add_argument(mcfcommand_option, dest=mcfcommand_dest, choices=mcfcommand_choices, default=mcfcommand_default, + help='command to mcf; if update is given, none of the remaining options nor MACHINE can be specified (default: %(default)s)') + + if mcfcommand in ('configure','update+configure'): + variations = parser.add_argument_group('Build Instructions') + + variations.add_argument('-p', '--enable-parallel-make', dest='parallel_make', type=int, default=0, + help='maximum number of parallel tasks each submake of bitbake should spawn (default: 0 = 2x the number of processor cores)') + + variations.add_argument('-b', '--enable-bb-number-threads', dest='bb_number_threads', type=int, default=0, + help='maximum number of bitbake tasks to spawn (default: 0 = 2x the number of processor cores))') + + icecc = parser.add_argument_group('ICECC Configuration') + + icecc_enable = icecc.add_mutually_exclusive_group() + # This can be changed to enabled by default when ES-1618 is fixed + icecc_enable.add_argument('--enable-icecc', dest='enable_icecc', action='store_true', default=False, + help='enable build to use ICECC, causes the shared state from the build artifacts not to be used (default: False)') + + icecc_enable.add_argument('--disable-icecc', dest='enable_icecc', action='store_false', default=True, + help='disable build from using ICECC (default: True)') + + icecc.add_argument('--enable-icecc-parallel-make', dest='icecc_parallel_make', type=int, default=0, + help='Number of parallel threads for ICECC build (default: 0 = 4x the number of processor cores))') + + icecc_advanced = parser.add_argument_group('ICECC Advanced Configuration') + + icecc_advanced.add_argument('--enable-icecc-user-package-blacklist', dest='icecc_user_package_blacklist', action='append', + help='Space separated list of components/recipes to be excluded from using ICECC (default: None)') + + icecc_advanced.add_argument('--enable-icecc-user-class-blacklist', dest='icecc_user_class_blacklist', action='append', + help='Space separated list of components/recipes class to be excluded from using ICECC (default: None)') + + icecc_advanced.add_argument('--enable-icecc-user-package-whitelist', dest='icecc_user_package_whitelist', action='append', + help='Space separated list of components/recipes to be forced to use ICECC (default: None)') + + icecc_advanced.add_argument('--enable-icecc-location', dest='icecc_location', default='', + help='location of ICECC tool (default: None)') + + icecc_advanced.add_argument('--enable-icecc-env-exec', dest='icecc_env_exec', default='', + help='location of ICECC environment script (default: None)') + + + partitions = parser.add_argument_group('Source Identification') + + mirrors = parser.add_argument_group('Networking and Mirrors') + + network = mirrors.add_mutually_exclusive_group() + + network.add_argument('--disable-network', dest='network', action='store_false', default=True, + help='disable fetching through the network (default: False)') + + network.add_argument('--enable-network', dest='network', action='store_true', default=True, + help='enable fetching through the network (default: True)') + + mirrors.add_argument('--sstatemirror', dest='sstatemirror', action='append', + help='set sstatemirror to specified URL, repeat this option if you want multiple sstate mirrors (default: None)') + + premirrorurl = mirrors.add_mutually_exclusive_group() + default_premirror = 'http://downloads.yoctoproject.org/mirror/sources' + premirrorurl.add_argument('--enable-default-premirror', dest='premirror', action='store_const', const=default_premirror, default="", + help='enable default premirror URL (default: False)') + premirrorurl.add_argument('--premirror', '--enable-premirror', dest='premirror', default='', + help='set premirror to specified URL (default: None)') + + premirroronly = mirrors.add_mutually_exclusive_group() + premirroronly.add_argument('--disable-fetch-premirror-only', dest='fetchpremirroronly', action='store_false', default=False, + help='disable fetching through the network (default: False)') + + premirroronly.add_argument('--enable-fetch-premirror-only', dest='fetchpremirroronly', action='store_true', default=False, + help='enable fetching through the network (default: True)') + + tarballs = mirrors.add_mutually_exclusive_group() + tarballs.add_argument('--disable-generate-mirror-tarballs', dest='generatemirrortarballs', action='store_false', default=False, + help='disable tarball generation of fetched components (default: True)') + + tarballs.add_argument('--enable-generate-mirror-tarballs', dest='generatemirrortarballs', action='store_true', default=False, + help='generate tarballs suitable for mirroring (default: False)') + + buildhistory = parser.add_argument_group('Buildhistory') + + buildhistory1 = buildhistory.add_mutually_exclusive_group() + + buildhistory1.add_argument('--disable-buildhistory', dest='buildhistory', action='store_false', default=True, + help='disable buildhistory functionality (default: False)') + + buildhistory1.add_argument('--enable-buildhistory', dest='buildhistory', action='store_true', default=True, + help='enable buildhistory functionality (default: True)') + + buildhistory.add_argument('--enable-buildhistoryauthor', dest='buildhistoryauthor', default='', help='specify name and email used in buildhistory git commits (default: none, will use author from git global config)') + + parser.add_argument('MACHINE', nargs='+') + + options = parser.parse_args(arglist) + if mcfcommand in ('configure','update+configure') and options.sstatemirror: + process_sstatemirror_option(options) + return options + +def process_sstatemirror_option(options): + """ + Sets global variable SSTATE_MIRRORS based on list of mirrors in options.sstatemirror + + /PATH suffix is automatically added when generating SSTATE_MIRRORS value + verify that user didn't already include it and show error if he did + """ + sstate_mirrors = '' + for m in options.sstatemirror: + if not m: + continue + if m.endswith("/PATH"): + logger.error("sstatemirror entry '%s', already ends with '/PATH', remove that" % m) + sys.exit(1) + if m.endswith("/"): + logger.error("sstatemirror entry '%s', ends with '/', remove that" % m) + sys.exit(1) + if len(m) <= 7: + logger.error("sstatemirror entry '%s', is incorrect, we expect at least 7 characters for protocol" % m) + sys.exit(1) + sstate_mirrors += "file://.* %s/PATH \\n \\\n" % m + if sstate_mirrors: + global SSTATE_MIRRORS + SSTATE_MIRRORS = "SSTATE_MIRRORS ?= \" \\\n%s\"\n" % sstate_mirrors + +def _icecc_installed(): + try: + # Note that if package is not installed following call will throw an exception + iceinstallstatus,iceversion = subprocess.check_output("dpkg-query -W icecc 2>&1" , + shell=True, + universal_newlines=True).split() + # We are expecting icecc for the name + if 'icecc' == iceinstallstatus: + if '1.0.1-1' == iceversion: + return True + else: + logger.warn("WARNING: Wrong icecc package version {} is installed, disabling build from using ICECC.\n".format(iceversion) + \ + "Please check 'How To Install ICECC on Your Workstation (Client)'\n" + \ + "http://wiki.lgsvl.com/pages/viewpage.action?pageId=96175316") + return False + else: + logger.warn('WARNING: ICECC package installation check failed, disabling build from using ICECC.') + return False + + except: + logger.warn('WARNING: ICECC package installation check failed, disabling build from using ICECC.') + return False + +def location_to_dirname(location): + str1 = location.split('/') + return os.path.splitext(str1[len(str1)-1])[0] + +def read_weboslayers(path): + sys.path.insert(0,path) + if not os.path.isfile(os.path.join(path,'weboslayers.py')): + raise Exception("Error: Configuration file %s does not exist!" % os.path.join(path,'weboslayers.py')) + + from weboslayers import webos_layers + + for p in webos_layers: + layer = {"name":p[0], "priority":p[1], "url":p[2], "submission":p[3], "location":p[4]} + LAYERS[layer["name"]] = layer + parsesubmissions(layer) + if not layer["url"] and not layer["location"]: + raise Exception("Error: Layer '%s' does not have either URL or alternative working-dir defined in weboslayers.py" % layer["name"]) + if not layer["location"]: + layer["location"] = location_to_dirname(layer["url"]) + + from weboslayers import Distribution + global DISTRO + DISTRO = Distribution + + from weboslayers import Machines + global SUPPORTED_MACHINES + SUPPORTED_MACHINES = Machines + +def parsesubmissions(layer): + branch = '' + commit = '' + tag = '' + for vgit in layer["submission"].split(','): + if not vgit: + continue + str1, str2 = vgit.split('=') + if str1.lower() == 'commit': + if not commit: + commit = str2 + elif str1.lower() == 'branch': + branch = str2 + elif str1.lower() == 'tag': + if not tag: + tag = str2 + + if not branch: + branch = 'master' + + layer["branch_new"] = branch + layer["commit_new"] = commit + layer["tag_new"] = tag + +def wait_for_git_mirror(newcommitid): + repodir=os.getcwd() + cmd = 'git fetch %s %s >&2' % (REMOTE, newcommitid) + success = False + nr_of_retries = 30 + for i in range(1, nr_of_retries+1): + logger.info('MCF-%s: trying to fetch revision %s in %s attempt %s from %s' % (__version__, newcommitid, repodir, i, nr_of_retries)) + try: + if newcommitid.startswith('refs/changes/'): + echo_check_call(cmd) + elif not contains_ref(newcommitid): + echo_check_call('git remote update && git fetch %s --tags' % REMOTE) + success = True + break + except subprocess.CalledProcessError: + sleep(30) + if not success: + logger.error("MCF-%s Cannot checkout %s in %s" % (__version__, newcommitid, repodir)) + sys.exit(1) + +def downloadrepo(layer): + cmd = 'git clone %s %s' % (layer["url"], layer["location"]) + echo_check_call(cmd) + + olddir = os.getcwd() + os.chdir(layer["location"]) + newbranch = layer["branch_new"] + + if newbranch: + refbranchlist = echo_check_call("git branch") + refbranch = refbranchlist.splitlines() + foundbranch = False + for ibranch in refbranch: + if newbranch in ibranch: + foundbranch = True + if not foundbranch: + refbranchlist = echo_check_call("git branch -r") + refbranch = refbranchlist.splitlines() + for ibranch in refbranch: + if ibranch == " %s/%s" % (REMOTE, newbranch): + foundbranch = True + logger.info( " found %s " % ibranch ) + cmd ='git checkout -B %s %s' % (newbranch,ibranch) + echo_check_call(cmd) + break + + currentbranch = echo_check_call("git rev-parse --abbrev-ref HEAD").rstrip() + newcommitid = layer["commit_new"] + if newcommitid: + if newcommitid.startswith('refs/changes/'): + wait_for_git_mirror(newcommitid) + if newbranch and newbranch != currentbranch: + # older git doesn't allow to update reference on currently checked out branch + cmd ='git checkout -B %s FETCH_HEAD' % (newbranch) + elif newbranch: + # we're already on requested branch + cmd ='git reset --hard FETCH_HEAD' + else: + # we don't have any branch preference use detached + cmd ='git checkout FETCH_HEAD' + echo_check_call(cmd) + else: + if not contains_ref(newcommitid): + wait_for_git_mirror(newcommitid) + if newbranch and newbranch != currentbranch: + # older git doesn't allow to update reference on currently checked out branch + cmd ='git checkout -B %s %s' % (newbranch,newcommitid) + elif newbranch: + # we're already on requested branch + cmd ='git reset --hard %s' % newcommitid + else: + # we don't have any branch preference use detached + cmd ='git checkout %s' % newcommitid + echo_check_call(cmd) + + newtag = layer["tag_new"] + if newtag: + if newbranch and newbranch != currentbranch: + # older git doesn't allow to update reference on currently checked out branch + cmd ='git checkout -B %s %s' % (newbranch,newtag) + elif newbranch: + # we're already on requested branch + cmd ='git reset --hard %s' % newtag + else: + cmd ='git checkout %s' % newtag + echo_check_call(cmd) + + os.chdir(olddir) + +def parselayerconffile(layer, layerconffile): + with open(layerconffile, 'r') as f: + lines = f.readlines() + for line in lines: + if re.search( 'BBFILE_COLLECTIONS.*=' , line): + (dummy, collectionname) = line.rsplit('=') + collectionname = collectionname.strip() + collectionname = collectionname.strip("\"") + layer["collection_name"] = collectionname + logger.debug("parselayerconffile(%s,%s) -> %s" % (layer["name"], layerconffile, layer["collection_name"])) + +def traversedir(layer): + for path, dirs, files in os.walk(layer["location"]): + if os.path.basename(os.path.dirname(path)) == layer["name"]: + for filename in files: + if filename == 'layer.conf': + layer["collection_path"] = os.path.relpath(os.path.dirname(path), os.path.dirname(layer["location"])) + logger.debug("traversedir(%s,%s) -> %s" % (layer["name"], layer["location"], layer["collection_path"])) + + layerconffile = os.path.join(path, filename) + parselayerconffile(layer, layerconffile) + break + +def parse_collections(srcdir): + for layer in sorted(LAYERS.values(), key=lambda l: l["priority"]): + if os.path.exists(layer["location"]): + traversedir(layer) + else: + raise Exception("Error: Directory '%s' does not exist, you probably need to call update" % layer["location"]) + +def write_bblayers_conf(sourcedir): + locations = "" + bblayers = "" + priorities = "" + for layer in sorted(LAYERS.values(), key=lambda l: l["priority"], reverse=True): + if layer["priority"] == -1: + # bitbake is not metadata layer, skip it + continue + + if os.path.isabs(layer["location"]): + topdir = layer["location"] + else: + topdir = "${TOPDIR}" + layer_name = layer["name"].replace('-','_').upper() + + if "collection_path" not in layer: + logger.error("Layer %s doesn't exist at all or local.conf file wasn't found inside" % layer["name"]) + continue + + locations += "%s_LAYER ?= \"%s/%s\"\n" % (layer_name, topdir, layer["collection_path"]) + bblayers += " ${%s_LAYER} \\\n" % layer_name + priorities += "BBFILE_PRIORITY_%s_forcevariable = \"%s\"\n" % (layer["collection_name"], layer["priority"]) + + with open(os.path.join(sourcedir, "conf", "bblayers.conf"), 'a') as f: + f.write('\n') + f.write(locations) + f.write('\n') + f.write('BBFILES ?= ""\n') + f.write('BBLAYERS ?= " \\\n') + f.write(bblayers) + f.write('"\n') + f.write(priorities) + +def update_layers(sourcedir): + logger.info('MCF-%s: Updating build directory' % __version__) + layers_sanity = list() + update_location = list() + for layer in sorted(LAYERS.values(), key=lambda l: l["priority"]): + if layer["submission"] and layer["location"] not in update_location: + update_location.append(layer["location"]) + if not os.path.exists(os.path.abspath(layer["location"])): + # downloadrepo + downloadrepo(layer) + else: + # run sanity check on repo + if reposanitycheck(layer) != 0: + layers_sanity.append(layer["location"]) + + # update layers + updaterepo(layer) + + if layers_sanity: + logger.info('Found local changes for repos(s) %s' % layers_sanity) + + printupdatesummary() + +def printupdatesummary (): + logger.info('Repo Update Summary') + logger.info('===================') + found = False + for layer in sorted(LAYERS.values(), key=lambda l: l["priority"]): + if "sanity_uncommitted_clean" in layer and layer["sanity_uncommitted_clean"]: + logger.info(' *) local uncommitted changes were removed because of --clean parameter') + found = True + if "sanity_uncommitted_changes" in layer and layer["sanity_uncommitted_changes"]: + logger.info(' *) local uncommitted changes, use \'git stash pop\' to retrieve') + found = True + if "sanity_dumped_changes" in layer and layer["sanity_dumped_changes"]: + logger.info(' *) local committed changes, patches are backed up in %s/' % layer["repo_patch_dir"]) + found = True + if "sanity_untracked_changes" in layer and layer["sanity_untracked_changes"]: + logger.info(' *) local untracked changes') + found = True + if "branch_new" in layer and "branch_current" in layer and layer["branch_new"] != layer["branch_current"]: + logger.info(' *) switched branches from %s to %s' % (layer["branch_current"], layer["branch_new"])) + found = True + if not found: + logger.info('No local changes found') + +def get_remote_branch(newbranch, second_call = False): + remotebranch = None + refbranchlist = echo_check_call("git branch -r") + refbranch = refbranchlist.splitlines() + for ibranch in refbranch: + if ibranch == " %s/%s" % (REMOTE, newbranch): + remotebranch = ibranch.strip() + break + if remotebranch or second_call: + return remotebranch + else: + # try it again after "git remote update" + echo_check_call("git remote update") + return get_remote_branch(newbranch, True) + +def reposanitycheck(layer): + olddir = os.getcwd() + os.chdir(layer["location"]) + + layer["branch_current"] = echo_check_call("git rev-parse --abbrev-ref HEAD").rstrip() + + res = False + + if CLEAN: + if echo_check_call("git status --porcelain -s"): + layer["sanity_uncommitted_clean"] = True + logger.warn('Removing all local changes and untracked files in [%s]' % layer["location"]) + # abort rebase if git pull --rebase from update_layers got stuck on some local commit + try: + echo_check_call("git rebase --abort 2>/dev/null") + except subprocess.CalledProcessError: + # we can ignore this one + pass + + echo_check_call("git stash clear") + echo_check_call("git clean -fdx") + echo_check_call("git reset --hard") + else: + logger.info('Checking for local changes in [%s]' % layer["location"]) + if echo_check_call("git status --porcelain --u=no -s"): + logger.warn('Found local uncommitted changes in [%s]' % layer["location"]) + layer["sanity_uncommitted_changes"] = True + echo_check_call("git stash") + res = True + + if echo_check_call("git status --porcelain -s | grep -v '^?? MCF-PATCHES_' || true"): + logger.warn('Found local untracked changes in [%s]' % layer["location"]) + layer["sanity_untracked_changes"] = True + res = True + + try: + remote = echo_check_call('git remote | grep "^%s$"' % REMOTE) + except subprocess.CalledProcessError: + remote = '' + + if not remote: + logger.error("Checkout %s doesn't have the remote '%s'" % (layer["location"], REMOTE)) + raise Exception("Checkout %s doesn't have the remote '%s'" % (layer["location"], REMOTE)) + + try: + urlcurrent = echo_check_call("git config remote.%s.url" % REMOTE) + except subprocess.CalledProcessError: + # git config returns 1 when the option isn't set + urlcurrent = '' + + # there is extra newline at the end + urlcurrent = urlcurrent.strip() + + logger.debug("reposanitycheck(%s) dir %s, branchinfo %s, branchinfonew %s, url %s, urlnew %s" % (layer["name"], layer["location"], layer["branch_current"], layer["branch_new"], layer["url"], urlcurrent)) + + if urlcurrent != layer["url"]: + logger.warn("Changing url for remote '%s' from '%s' to '%s'" % (REMOTE, urlcurrent, layer["url"])) + echo_check_call("git remote set-url %s %s" % (REMOTE, layer["url"])) + # Sync with new remote repo + try: + echo_check_call('git remote update') + except subprocess.CalledProcessError: + raise Exception('Failed to fetch %s repo' % LOCATIONS[layer]) + + newbranch = layer["branch_new"] + if newbranch: + refbranchlist = echo_check_call("git branch") + refbranch = refbranchlist.splitlines() + foundlocalbranch = False + needcheckout = True + for ibranch in refbranch: + if ibranch == " %s" % newbranch: + foundlocalbranch = True + break + if ibranch == "* %s" % newbranch: + foundlocalbranch = True + needcheckout = False + break + + remotebranch = get_remote_branch(newbranch) + + if foundlocalbranch and remotebranch: + if needcheckout: + echo_check_call('git checkout %s' % newbranch) + + head = echo_check_call("git rev-parse --abbrev-ref HEAD").rstrip() + patchdir = './MCF-PATCHES_%s-%s' % (head.replace('/','_'), timestamp) + layer["repo_patch_dir"] = "%s/%s" % (layer["location"], patchdir) + cmd ='git format-patch %s..%s -o %s' % (remotebranch,newbranch,patchdir) + rawpatches = echo_check_call(cmd) + patches = rawpatches.splitlines() + num = len(patches) + # logger.info( ' info: number of patches: %s ' % num) + if num > 0: + layer["sanity_dumped_changes"] = True + res = True + else: + # remove empty dir if there weren't any patches created by format-patch + cmd ='rmdir --ignore-fail-on-non-empty %s' % patchdir + echo_check_call(cmd) + + try: + trackingbranch = echo_check_call("git config --get branch.%s.merge" % newbranch) + except subprocess.CalledProcessError: + # git config returns 1 when the option isn't set + trackingbranch = '' + + try: + trackingremote = echo_check_call("git config --get branch.%s.remote" % newbranch) + except subprocess.CalledProcessError: + # git config returns 1 when the option isn't set + trackingremote = '' + + # there is extra newline at the end + trackingbranch = trackingbranch.strip() + trackingremote = trackingremote.strip() + + if not trackingbranch or not trackingremote or trackingbranch.replace('refs/heads',trackingremote) != remotebranch: + logger.warn("checkout %s was tracking '%s/%s' changing it to track '%s'" % (layer["location"], trackingremote, trackingbranch, remotebranch)) + # to ensure we are tracking remote + echo_check_call('git branch --set-upstream %s %s' % (newbranch, remotebranch)) + + elif not foundlocalbranch and remotebranch: + echo_check_call('git checkout -b %s %s' % (newbranch, remotebranch)) + else: + # anything else is failure + raise Exception('Could not find local and remote branches for %s' % newbranch) + else: + raise Exception('Undefined branch name') + + newdir = os.chdir(olddir) + return res + +# Taken from bitbake/lib/bb/fetch2/git.py with modifications for mcf usage +def contains_ref(tag): + cmd = "git log --pretty=oneline -n 1 %s -- 2>/dev/null | wc -l" % (tag) + output = echo_check_call(cmd) + if len(output.split()) > 1: + raise Exception("Error: '%s' gave output with more then 1 line unexpectedly, output: '%s'" % (cmd, output)) + return output.split()[0] != "0" + +def updaterepo(layer): + olddir = os.getcwd() + os.chdir(layer["location"]) + + layer["commit_current"] = echo_check_call("git log --pretty=format:%h -1") + + newcommitid = layer["commit_new"] + currentcommitid = layer["commit_current"] + newbranch = layer["branch_new"] + currentbranch = layer["branch_current"] + + logger.debug("updaterepo(%s) dir %s, id %s, newid %s, branch %s, newbranch %s" % (layer["name"], layer["location"], currentcommitid, newcommitid, currentbranch, newbranch)) + + if newcommitid != currentcommitid: + logger.info('Updating [%s]' % layer["location"]) + if newcommitid: + if newcommitid.startswith('refs/changes/'): + wait_for_git_mirror(newcommitid) + if newbranch and newbranch != currentbranch: + # older git doesn't allow to update reference on currently checked out branch + cmd ='git checkout -B %s FETCH_HEAD' % (newbranch) + elif newbranch: + # we're already on requested branch + cmd ='git reset --hard FETCH_HEAD' + else: + # we don't have any branch preference use detached + cmd ='git checkout FETCH_HEAD' + echo_check_call(cmd) + else: + if not contains_ref(newcommitid): + wait_for_git_mirror(newcommitid) + if newbranch and newbranch != currentbranch: + # older git doesn't allow to update reference on currently checked out branch + cmd ='git checkout -B %s %s' % (newbranch,newcommitid) + elif newbranch: + # we're already on requested branch + cmd ='git reset --hard %s' % newcommitid + else: + # we don't have any branch preference use detached + cmd ='git checkout %s' % newcommitid + echo_check_call(cmd) + else: + if CLEAN: + echo_check_call("git remote update") + echo_check_call('git reset --hard %s/%s' % (REMOTE, newbranch)) + else: + # current branch always tracks a remote one + echo_check_call('git pull %s' % REMOTE) + logger.info('Done updating [%s]' % layer["location"]) + else: + logger.info(('[%s] is up-to-date.' % layer["location"])) + + newdir = os.chdir(olddir) + os.getcwd() + +def set_verbosity(options): + if options.silent and options.silent == 1: + set_log_level('WARNING') + elif options.silent and options.silent == 2: + set_log_level('ERROR') + elif options.silent and options.silent >= 3: + set_log_level('CRITICAL') + elif options.verbose and options.verbose == 1: + set_log_level('DEBUG') + elif options.verbose and options.verbose >= 2: + set_log_level('DEBUG') + # but also run every system command with set -x + enable_trace() + else: + set_log_level('INFO') + +def recover_current_mcf_state(srcdir, origoptions): + mcfstatusfile = os.path.join(srcdir, "mcf.status") + if not os.path.exists(mcfstatusfile): + raise Exception("mcf.status does not exist.") + + commandlinereconstructed = list() + commandlinereconstructed.append('ignored-argv-0') + start = False + with open(mcfstatusfile, 'r') as f: + for line in f.readlines(): + line = line.strip() + if not start: + start = line.startswith("exec") + continue + + if start: + if line.startswith('--command'): + # skip --command configure + continue + elif line.startswith('--'): + line = line.rstrip('\\') + line = line.strip(' ') + line = line.replace('\"','') + line = line.replace('\'','') + commandlinereconstructed.append(line) + else: + lines = line.rstrip('\\') + lines = lines.lstrip() + lines = lines.rstrip() + lines = lines.split() + for lline in lines: + commandlinereconstructed.append(lline) + + sys.argv = commandlinereconstructed + options = getopts() + # always use clean/verbose/silent flags from origoptions not mcf.status + options.clean = origoptions.clean + options.verbose = origoptions.verbose + options.silent = origoptions.silent + return options + +def checkmirror(name, url): + if url.startswith('file://'): + pathstr = url[7:] + if not os.path.isdir(pathstr): + logger.warn("%s parameter '%s' points to non-existent directory" % (name, url)) + elif not os.listdir(pathstr): + logger.warn("%s parameter '%s' points to empty directory, did you forgot to mount it?" % (name, url)) + elif len(url) <= 7: + logger.error("%s parameter '%s' is incorrect, we expect at least 7 characters for protocol" % (name, url)) + +def sanitycheck(options): + try: + mirror = echo_check_call('git config -l | grep "^url\..*insteadof=github.com/"') + except subprocess.CalledProcessError: + # git config returns 1 when the option isn't set + mirror = '' + pass + if not mirror: + logger.warn('No mirror for github.com was detected, please define mirrors in ~/.gitconfig if some are available') + if options.sstatemirror: + for m in options.sstatemirror: + if not m: + continue + checkmirror('sstatemirror', m) + if options.premirror: + checkmirror('premirror', options.premirror) + +def configure_build(srcdir, options): + files = [ + [os.path.join(srcdir, 'build-templates', 'mcf-status.in'), 'mcf.status' ], + [os.path.join(srcdir, 'build-templates', 'oe-init-build-env.in'), 'oe-init-build-env' ], + [os.path.join(srcdir, 'build-templates', 'Makefile.in'), 'Makefile' ], + [os.path.join(srcdir, 'build-templates', 'bblayers-conf.in'), 'conf/bblayers.conf'], + [os.path.join(srcdir, 'build-templates', 'local-conf.in'), 'conf/local.conf' ], + ] + + replacements = [ + ['@bb_number_threads@', str(options.bb_number_threads)], + ['@parallel_make@', str(options.parallel_make)], + ['@no_network@', '0' if options.network else '1'], + ['@fetchpremirroronly@', '1' if options.fetchpremirroronly else '0'], + ['@generatemirrortarballs@', '1' if options.generatemirrortarballs else '0'], + ['@buildhistory_enabled@', '1' if options.buildhistory else '0'], + ['@buildhistory_class@', 'buildhistory' if options.buildhistory else '' ], + ['@buildhistory_author_assignment@', 'BUILDHISTORY_COMMIT_AUTHOR ?= "%s"' % options.buildhistoryauthor if options.buildhistoryauthor else ''], + ['@premirror_assignment@', 'SOURCE_MIRROR_URL ?= "%s"' % options.premirror if options.premirror else ''], + ['@premirror_inherit@', 'INHERIT += "own-mirrors"' if options.premirror else ''], + ['@sstatemirror_assignment@', SSTATE_MIRRORS if options.sstatemirror else ''], + ['@premirror@', options.premirror], + ['@sstatemirror@', ' --sstatemirror='.join(options.sstatemirror) if options.sstatemirror else ''], + ['@buildhistoryauthor@', options.buildhistoryauthor], + ['@buildhistory@', '--%s-buildhistory' % ('enable' if options.buildhistory else 'disable')], + ['@network@', '--%s-network' % ('enable' if options.network else 'disable')], + ['@fetchpremirroronlyoption@', '--%s-fetch-premirror-only' % ('enable' if options.fetchpremirroronly else 'disable')], + ['@generatemirrortarballsoption@', '--%s-generate-mirror-tarballs' % ('enable' if options.generatemirrortarballs else 'disable')], + ['@machine@', options.MACHINE[0]], + ['@machines@', ' '.join(options.MACHINE)], + ['@distro@', DISTRO], + ['@prog@', progname], + ['@srcdir@', srcdir], + ['@abs_srcdir@', abs_srcdir], + ] + + # if icecc is not installed, or version does not match requirements, then disabling icecc is the correct action. + icestate = _icecc_installed() + + icecc_replacements = [ + ['@icecc_disable_enable@', '1' if not icestate or not options.enable_icecc else ''], + ['@icecc_parallel_make@', '%s' % options.icecc_parallel_make], + ['@alternative_icecc_installation@', ('ICECC_PATH ?= "%s"' % options.icecc_location) if options.icecc_location else ''], + ['@icecc_user_package_blacklist@', ('ICECC_USER_PACKAGE_BL ?= "%s"' % ' '.join(options.icecc_user_package_blacklist)) if options.icecc_user_package_blacklist else ''], + ['@icecc_user_class_blacklist@', ('ICECC_USER_CLASS_BL ?= "%s"' % ' '.join(options.icecc_user_class_blacklist)) if options.icecc_user_class_blacklist else ''], + ['@icecc_user_package_whitelist@', ('ICECC_USER_PACKAGE_WL ?= "%s"' % ' '.join(options.icecc_user_package_whitelist)) if options.icecc_user_package_whitelist else ''], + ['@icecc_environment_script@', 'ICECC_ENV_EXEC ?= "%s"' % options.icecc_env_exec if options.icecc_location else ''], + ['@icecc_disable_enable_mcf@', '--%s-icecc' % ('disable' if not icestate or not options.enable_icecc else 'enable')], + ['@alternative_icecc_installation_mcf@', options.icecc_location if options.icecc_location else ''], + ['@icecc_environment_script_mcf@', options.icecc_env_exec if options.icecc_location else ''], + ['@icecc_user_package_blacklist_mcf@', (' '.join(options.icecc_user_package_blacklist)) if options.icecc_user_package_blacklist else ''], + ['@icecc_user_class_blacklist_mcf@', (' '.join(options.icecc_user_class_blacklist)) if options.icecc_user_class_blacklist else ''], + ['@icecc_user_package_whitelist_mcf@', (' '.join(options.icecc_user_package_whitelist)) if options.icecc_user_package_whitelist else ''], + ] + + replacements = replacements + icecc_replacements + + logger.info('MCF-%s: Configuring build directory BUILD' % __version__) + for f in files: + process_file(f, replacements) + parse_collections(srcdir) + write_bblayers_conf(srcdir) + logger.info('MCF-%s: Done configuring build directory BUILD' % __version__) + + echo_check_call('/bin/chmod a+x mcf.status', options.verbose) + +if __name__ == '__main__': + # NB. The exec done by mcf.status causes argv[0] to be an absolute pathname + progname = sys.argv[0] + + # Use the same timestamp for everything created by this invocation of mcf + timestamp = strftime("%Y%m%d%H%M%S", gmtime()) + + options = getopts() + + srcdir = os.path.dirname(progname) + abs_srcdir = os.path.abspath(srcdir) + + if options.mcfcommand == 'update': + # recover current mcf state + options = recover_current_mcf_state(srcdir, options) + + set_verbosity(options) + + if options.clean: + enable_clean() + + read_weboslayers(srcdir) + for M in options.MACHINE: + if M not in SUPPORTED_MACHINES: + logger.error("MACHINE argument '%s' isn't supported (does not appear in Machines in weboslayers.py '%s')" % (M, SUPPORTED_MACHINES)) + sys.exit(1) + + if options.mcfcommand != 'configure': + update_layers(srcdir) + + configure_build(srcdir, options) + + sanitycheck(options) + logger.info('Done.') diff --git a/scripts/bitbake-bash_completion b/scripts/bitbake-bash_completion new file mode 100644 index 00000000..c13cc61c --- /dev/null +++ b/scripts/bitbake-bash_completion @@ -0,0 +1,59 @@ +# +# "bitbake autocompletion description" +# +# LICENSE: GNU GPLv2 or any later version +# CREATED: Filippo Basso (12-DEC-2007) +# ORIGIN-URL: http://lists.linuxtogo.org/pipermail/openembedded-devel/2007-December/003766.html +# MODIFIED: +# +# Use: +# - simply put it into your /etc/bash_completion.d/ directory... +# For help enhancing this script, see also: +# - http://www.debian-administration.org/articles/317 +# - http://www.gnu.org/software/bash/manual/bashref.html#SEC112 +# + + +_bitbake() +{ + local cur conns prev options options1 i j A recipefile + COMPREPLY=() + + cur="${COMP_WORDS[COMP_CWORD]}" + prev="${COMP_WORDS[COMP_CWORD-1]}" + options="--bind= --buildfile= --clear-stamp= --cmd= --continue --debug --dry-run --dump-signatures --environment --force --graphviz --help --ignore-deps= --log-domains= --no-setscene --parse-only --postread= --profile --read= --revisions-changed --server-only --servertype= --show-versions --tryaltconfigs --ui= --verbose --version" + options1="-B -C -D -I -P -R -S -a -b -c -e -f -g -h -k -l -n -p -r -s -t -u -v" + commands=`(cd ${TOPDIR}; make _display_conveniences)` # TOPDIR is set in oe-init-build-env + + # ------- Options completion ------- + if [[ ${prev} == "-c" ]] + then + COMPREPLY=( $(compgen -W "${commands}" -- ${cur}) ) + return 0 + fi + + if [[ ${cur} == -* ]] + then + if [[ ${cur} == --* ]] + then + COMPREPLY=( $(compgen -W "${options}" -- ${cur}) ) + return 0 + else + COMPREPLY=( $(compgen -W "${options1}" -- ${cur}) ) + return 0 + fi + fi + + # ------- Recipe list autocompletion ------- + # This doesn't work when overriding META_*_LAYER settings + # in conf/bblayers.conf from webos-local.conf + # TOPDIR is set in oe-init-build-env + + metapaths=`grep '^META_.*LAYER' ${TOPDIR}/conf/bblayers.conf | cut -f 2 -d '"' ` + metapaths=`eval echo $metapaths|tr " " ","` + metapaths=`eval echo {${metapaths}}/recipes-*/*/${cur}*.bb` + conns=`ls -fb $metapaths 2> /dev/null | sed -e 's!/[^ ]*/!!g' -e 's!_[^ ]*!!g' -e 's!\.bb!!g'` + COMPREPLY=( $(compgen -W "${conns}" -- ${cur}) ) + return 0 +} +complete -F _bitbake bitbake diff --git a/scripts/build.sh b/scripts/build.sh new file mode 100755 index 00000000..aa319052 --- /dev/null +++ b/scripts/build.sh @@ -0,0 +1,695 @@ +#!/bin/bash + +# Copyright (c) 2013-2017 LG Electronics, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Uncomment line below for debugging +#set -x + +# Some constants +SCRIPT_VERSION="6.10.2c" +SCRIPT_NAME=`basename $0` +AUTHORITATIVE_OFFICIAL_BUILD_SITE="svl" + +BUILD_REPO="webosose/build-webos" +BUILD_LAYERS=("webosose/meta-webosose") + +# Create BOM files, by default disabled +CREATE_BOM= + +# Dump signatures, by default disabled +SIGNATURES= + +# Build site passed to script from outside (Replaces detecting it from JENKINS_URL) +BUILD_SITE= +# Build job passed to script from outside (Replaces detecting it from JOB_NAME) +BUILD_JOB= +# Branch where to push buildhistory, for repositories on gerrit it should start with refs/heads (Replaces detecting it from JOB_NAME and JENKINS_URL) +BUILD_BUILDHISTORY_BRANCH= +# Name of git remote used in cloned repos +BUILD_REMOTE=origin + +# We assume that script is inside scripts subfolder of build project +# and form paths based on that +CALLDIR=${PWD} + +BUILD_TIMESTAMP_START=`date -u +%s` +BUILD_TIMESTAMP_OLD=$BUILD_TIMESTAMP_START + +TIME_STR="TIME: %e %S %U %P %c %w %R %F %M %x %C" + +# We need absolute path for ARTIFACTS +pushd `dirname $0` > /dev/null +SCRIPTDIR=`pwd -P` +popd > /dev/null + +# Now let's ensure that: +pushd ${SCRIPTDIR} > /dev/null +if [ ! -d "../scripts" ] ; then + echo "Make sure that ${SCRIPT_NAME} is in scripts folder of project" + exit 2 +fi +popd > /dev/null + +cd "${SCRIPTDIR}/.." + +BUILD_TOPDIR=`echo "$SCRIPTDIR" | sed 's#/scripts/*##g'` +ARTIFACTS="${BUILD_TOPDIR}/BUILD-ARTIFACTS" +mkdir -p "${ARTIFACTS}" +BUILD_TIME_LOG=${BUILD_TOPDIR}/time.txt + +function print_timestamp { + BUILD_TIMESTAMP=`date -u +%s` + BUILD_TIMESTAMPH=`date -u +%Y%m%dT%TZ` + + local BUILD_TIMEDIFF=`expr ${BUILD_TIMESTAMP} - ${BUILD_TIMESTAMP_OLD}` + local BUILD_TIMEDIFF_START=`expr ${BUILD_TIMESTAMP} - ${BUILD_TIMESTAMP_START}` + BUILD_TIMESTAMP_OLD=${BUILD_TIMESTAMP} + printf "TIME: ${SCRIPT_NAME}-${SCRIPT_VERSION} $1: ${BUILD_TIMESTAMP}, +${BUILD_TIMEDIFF}, +${BUILD_TIMEDIFF_START}, ${BUILD_TIMESTAMPH}\n" | tee -a ${BUILD_TIME_LOG} +} + +print_timestamp "start" + +declare -i RESULT=0 + +function showusage { + echo "Usage: ${SCRIPT_NAME} [OPTION...]" + cat <&2 + if echo ${ref} | grep -q "^refs/changes/"; then + git fetch ${BUILD_REMOTE} ${ref} >&2 && return + else + local cmd="git log --pretty=oneline -n 1 ${ref} --" + local contains=`${cmd} 2>/dev/null | wc -l` + if [ "${contains}" -gt 1 ] ; then + echo "ERROR: ${SCRIPT_NAME}-${SCRIPT_VERSION} '${cmd}' gave output with more then 1 line unexpectedly in `pwd`." >&2 + exit 1 + elif [ "${contains}" -ne 1 ] ; then + git remote update >&2 + git fetch ${BUILD_REMOTE} --tags >&2 + else + echo "INFO: ${SCRIPT_NAME}-${SCRIPT_VERSION} ${ref} is now available in `pwd`." >&2 + return + fi + fi + sleep 30 # wait 30s for git-mirror to get ${ref} + done + echo "ERROR: ${SCRIPT_NAME}-${SCRIPT_VERSION} Cannot checkout ${ref} in `pwd`" >&2 + exit 1 +} + +function check_project { +# Check out appropriate refspec for layer verification based on GERRIT_PROJECT +# or master if we assume other layers stable + layer=`basename $1` + if [ -d "${layer}" ] ; then + pushd "${layer}" >/dev/null + if [ "${GERRIT_PROJECT}" = "$1" ] ; then + echo "NOTE: Checking out layer '${layer}' in gerrit refspec '${GERRIT_REFSPEC}'" >&2 + wait_for_git_mirror ${GERRIT_REFSPEC} && git checkout FETCH_HEAD >&2 + if [ $? -ne 0 ] ; then + echo "ERROR: Failed to checkout layer '${layer}' at gerrit refspec '${GERRIT_REFSPEC}'!" >&2 + exit 1 + fi + else + current_branch=`git branch --list|grep ^*\ |awk '{print $2}'` + echo "NOTE: Run 'git remote update && git reset --hard ${BUILD_REMOTE}/${current_branch}' in layer '${layer}'" >&2 + echo "NOTE: Current branch - ${current_branch}" >&2 + git remote update >&2 && git reset --hard ${BUILD_REMOTE}/${current_branch} >&2 + if [ $? -ne 0 ] ; then + echo "ERROR: Failed to checkout layer '${layer}' at ref '${ref}'!" >&2 + exit 1 + fi + fi + popd >/dev/null + fi +} + +function check_project_vars { + # Check out appropriate refspec passed in _commit + # when requested by use__commit + layer=`basename $1` + use=$(eval echo \$"use_${layer//-/_}_commit") + ref=$(eval echo "\$${layer//-/_}_commit") + if [ "$use" = "true" ]; then + echo "NOTE: Checking out layer '${layer}' in ref '${ref}'" >&2 + ldesc=" ${layer}:${ref}" + if [ -d "${layer}" ] ; then + pushd "${layer}" >/dev/null + if echo ${ref} | grep -q '^refs/changes/'; then + wait_for_git_mirror ${ref} && git checkout FETCH_HEAD >&2 + if [ $? -ne 0 ] ; then + echo "ERROR: Failed to checkout layer '${layer}' at ref '${ref}'!" >&2 + exit 1 + fi + else + # Check if the ref is branch name without remote name + if git branch -a | grep -q "${BUILD_REMOTE}/@${ref}$"; then + ref=${BUILD_REMOTE}/@${ref} + echo "NOTE: Checking out layer '${layer}' as remote branch '${ref}'" >&2 + elif git branch -a | grep -q "${BUILD_REMOTE}/${ref}$"; then + ref=${BUILD_REMOTE}/${ref} + echo "NOTE: Checking out layer '${layer}' as remote branch '${ref}'" >&2 + fi + git remote update >&2 && git fetch ${BUILD_REMOTE} --tags >&2 && wait_for_git_mirror ${ref} && git reset --hard ${ref} >&2; + if [ $? -ne 0 ] ; then + echo "ERROR: Failed to checkout layer '${layer}' at ref '${ref}'!" >&2 + exit 1 + fi + fi + popd >/dev/null + else + echo "ERROR: Layer ${layer} does not exist!" >&2 + fi + fi + echo "$ldesc" +} + +function generate_webos_bom { + MACHINE=$1 + I=$2 + F=$3 + + rm -f webos-bom.json + /usr/bin/time -f "$TIME_STR" bitbake -c write_bom ${I} 2>&1 | tee /dev/stderr | grep '^TIME:' >> ${BUILD_TIME_LOG} + [ -d ${ARTIFACTS}/${MACHINE}/${I} ] || mkdir -p ${ARTIFACTS}/${MACHINE}/${I} + sort webos-bom.json > ${ARTIFACTS}/${MACHINE}/${I}/${F} + rm -f webos-bom.json +} + +function filter_images { + FILTERED_IMAGES="" + # remove images which aren't available for some MACHINEs + # no restriction in webos-pro + FILTERED_IMAGES="${IMAGES}" + if [ -n "${IMAGES}" -a -z "${FILTERED_IMAGES}" ] ; then + echo "ERROR: All images were filtered for MACHINE: '${MACHINE}', IMAGES: '${IMAGES}'" + fi +} + +function call_bitbake { + filter_images + /usr/bin/time -f "$TIME_STR" bitbake ${BBFLAGS} ${FILTERED_IMAGES} ${TARGETS} 2>&1 | tee /dev/stderr | grep '^TIME:' >> ${BUILD_TIME_LOG} + + # Be aware that non-zero exit code from bitbake doesn't always mean that images weren't created. + # All images were created if it shows "all succeeded" in" Tasks Summary": + # NOTE: Tasks Summary: Attempted 5450 tasks of which 5205 didn't need to be rerun and all succeeded. + + # Sometimes it's followed by: + # Summary: There were 2 ERROR messages shown, returning a non-zero exit code. + # the ERRORs can be from failed setscene tasks or from QA checks, but weren't fatal for build. + + # Collect exit codes to return them from this script (Use PIPESTATUS to read return code from bitbake, not from added tee) + RESULT+=${PIPESTATUS[0]} +} + +function add_md5sums_and_buildhistory_artifacts { + local I + for I in ${FILTERED_IMAGES}; do + local found_image=false + # Add .md5 files for image files, if they are missing or older than image file + local IMG_FILE + for IMG_FILE in ${ARTIFACTS}/${MACHINE}/${I}/*.vmdk* \ + ${ARTIFACTS}/${MACHINE}/${I}/*.tar.gz \ + ${ARTIFACTS}/${MACHINE}/${I}/*.tar.bz2 \ + ${ARTIFACTS}/${MACHINE}/${I}/*.rpi-sdimg \ + ${ARTIFACTS}/${MACHINE}/${I}/*.zip \ + ${ARTIFACTS}/${MACHINE}/*.fastboot \ + ${ARTIFACTS}/${MACHINE}/${I}/*.fastboot \ + ${ARTIFACTS}/${MACHINE}/${I}/*.epk \ + ${ARTIFACTS}/${MACHINE}/${I}/*.sh \ + ${ARTIFACTS}/${SDKMACHINE}/*.sh; do + if echo ${IMG_FILE} | grep -q "\.md5$"; then + continue + fi + if [ -e ${IMG_FILE} -a ! -L ${IMG_FILE} ] ; then + found_image=true + if [ ! -e ${IMG_FILE}.md5 -o ${IMG_FILE}.md5 -ot ${IMG_FILE} ] ; then + echo MD5: ${IMG_FILE} + md5sum ${IMG_FILE} | sed 's# .*/# #g' > ${IMG_FILE}.md5 + fi + fi + done + + # copy few interesting buildhistory reports only if the image was really created + # (otherwise old report from previous build checked out from buildhistory repo could be used) + if $found_image ; then + add_buildhistory_artifacts + fi + done +} + +function add_buildhistory_artifacts { + # XXX Might there be other subdirectories under buildhistory/sdk that weren't created by this build? + # Some MACHINEs like raspberrypi3-64 now contain dash which gets converted to underscore + # for MACHINE_ARCH, which is also the directory in buildhistory + BHMACHINE=`echo ${MACHINE} | sed 's/-/_/g'` + if ls buildhistory/sdk/*/${I} >/dev/null 2>/dev/null; then + if [ -n "${SDKMACHINE}" ] ; then + for d in buildhistory/sdk/${I}-${SDKMACHINE}-*; do + [ -d ${ARTIFACTS}/${SDKMACHINE}/${MACHINE} ] || mkdir -p ${ARTIFACTS}/${SDKMACHINE}/${MACHINE}/ + cp -a $d/${I} ${ARTIFACTS}/${SDKMACHINE}/${MACHINE}/ + done + else + for d in buildhistory/sdk/*; do + mkdir -p ${ARTIFACTS}/${MACHINE}/ + cp -a $d/${I} ${ARTIFACTS}/${MACHINE}/ + done + fi + else + if [ -f buildhistory/images/${BHMACHINE}/glibc/${I}/build-id.txt ]; then + ln -vn buildhistory/images/${BHMACHINE}/glibc/${I}/build-id.txt ${ARTIFACTS}/${MACHINE}/${I}/build-id.txt + else + ln -vn buildhistory/images/${BHMACHINE}/glibc/${I}/build-id ${ARTIFACTS}/${MACHINE}/${I}/build-id.txt + fi + if [ -z "$FIRST_IMAGE" -a -e ${ARTIFACTS}/${MACHINE}/${I}/build-id.txt ] ; then + # store build-id.txt from first IMAGE and first MACHINE as representant of whole build for InfoBadge + # instead of requiring jenkins job to hardcode MACHINE/IMAGE name in: + # manager.addInfoBadge("${manager.build.getWorkspace().child('buildhistory/images/qemux86/glibc/webos-image/build-id.txt').readToString()}") + # we should be able to use: + # manager.addInfoBadge("${manager.build.getWorkspace().child('BUILD-ARTIFACTS/build-id.txt').readToString()}") + # in all builds (making BUILD_IMAGES/BUILD_MACHINE changes less error-prone) + FIRST_IMAGE="${MACHINE}/${I}" + ln -vnf ${ARTIFACTS}/${BHMACHINE}/${I}/build-id.txt ${ARTIFACTS}/build-id.txt + fi + ln -vn buildhistory/images/${BHMACHINE}/glibc/${I}/image-info.txt ${ARTIFACTS}/${MACHINE}/${I}/image-info.txt + ln -vn buildhistory/images/${BHMACHINE}/glibc/${I}/files-in-image.txt ${ARTIFACTS}/${MACHINE}/${I}/files-in-image.txt + ln -vn buildhistory/images/${BHMACHINE}/glibc/${I}/installed-packages.txt ${ARTIFACTS}/${MACHINE}/${I}/installed-packages.txt + ln -vn buildhistory/images/${BHMACHINE}/glibc/${I}/installed-package-sizes.txt ${ARTIFACTS}/${MACHINE}/${I}/installed-package-sizes.txt + if [ -e buildhistory/images/${BHMACHINE}/glibc/${I}/installed-package-file-sizes.txt ] ; then + ln -vn buildhistory/images/${BHMACHINE}/glibc/${I}/installed-package-file-sizes.txt ${ARTIFACTS}/${MACHINE}/${I}/installed-package-file-sizes.txt + fi + fi +} + +function move_kernel_image_and_add_symlinks { + # include .fastboot kernel image if present; XXX we are assuming that their + # basenames start with "linux" + if ls BUILD/deploy/images/${MACHINE}/linux*.fastboot >/dev/null 2>/dev/null; then + [ -d ${ARTIFACTS}/${MACHINE}/kernel/ ] || mkdir -p ${ARTIFACTS}/${MACHINE}/kernel/ + ln -vn BUILD/deploy/images/${MACHINE}/linux*.fastboot ${ARTIFACTS}/${MACHINE}/kernel/ + # create symlinks in all image directories + local I + for I in ${FILTERED_IMAGES}; do + if [ -d ${ARTIFACTS}/${MACHINE}/${I} ] ; then + pushd ${ARTIFACTS}/${MACHINE}/${I} >/dev/null + local f + for f in ../kernel/linux*.fastboot; do + ln -snf $f . + done + popd >/dev/null + fi + done + fi +} + +function move_artifacts { + for I in ${FILTERED_IMAGES}; do + mkdir -p "${ARTIFACTS}/${MACHINE}/${I}" || true + # we store only tar.gz, vmdk.zip and .epk images + # and we don't publish kernel images anymore + if ls BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.vmdk >/dev/null 2>/dev/null; then + if type zip >/dev/null 2>/dev/null; then + # zip vmdk images if they exists + find BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.vmdk -exec zip -j {}.zip {} \; + ln -vn BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.vmdk.zip ${ARTIFACTS}/${MACHINE}/${I}/ + else + # report failure and publish vmdk + echo "ERROR: ${SCRIPT_NAME}-${SCRIPT_VERSION} zip utility isn't installed on the build server" >&2 + RESULT+=1 + ln -vn BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.vmdk ${ARTIFACTS}/${MACHINE}/${I}/ + fi + # copy webosvbox if we've built vmdk image + if [ -e meta-webosose/meta-webos/scripts/webosvbox -a ! -e ${ARTIFACTS}/${MACHINE}/webosvbox ] ; then + ln -vn meta-webosose/meta-webos/scripts/webosvbox ${ARTIFACTS}/${MACHINE} + fi + # copy few more files for creating different vmdk files with the same rootfs + if ls BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.rootfs.ext3 >/dev/null 2>/dev/null; then + ln -vn BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.rootfs.ext3 ${ARTIFACTS}/${MACHINE}/${I}/ + fi + if ls BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*-dbg.tar.gz >/dev/null 2>/dev/null; then + ln -vn BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*-dbg.tar.gz ${ARTIFACTS}/${MACHINE}/${I}/ + fi + if [ -e BUILD/sysroots/${MACHINE}/usr/lib/syslinux/mbr.bin ] ; then + ln -vn BUILD/sysroots/${MACHINE}/usr/lib/syslinux/mbr.bin ${ARTIFACTS}/${MACHINE}/${I}/ + fi + # this won't work in jobs which inherit rm_work, but until we change the image build to stage them use WORKDIR paths + if ls BUILD/work/${MACHINE}*/${I}/*/*/hdd/boot/ldlinux.sys >/dev/null 2>/dev/null; then + ln -vn BUILD/work/${MACHINE}*/${I}/*/*/hdd/boot/ldlinux.sys ${ARTIFACTS}/${MACHINE}/${I}/ + else + echo "INFO: ldlinux.sys doesn't exist, probably using rm_work" + fi + if ls BUILD/work/${MACHINE}*/${I}/*/*/hdd/boot/syslinux.cfg >/dev/null 2>/dev/null; then + ln -vn BUILD/work/${MACHINE}*/${I}/*/*/hdd/boot/syslinux.cfg ${ARTIFACTS}/${MACHINE}/${I}/ + else + echo "INFO: syslinux.cfg doesn't exist, probably using rm_work" + fi + if ls BUILD/work/${MACHINE}*/${I}/*/*/hdd/boot/vmlinuz >/dev/null 2>/dev/null; then + ln -vn BUILD/work/${MACHINE}*/${I}/*/*/hdd/boot/vmlinuz ${ARTIFACTS}/${MACHINE}/${I}/ + else + echo "INFO: vmlinuz doesn't exist, probably using rm_work" + fi + elif ls BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.rpi-sdimg >/dev/null 2>/dev/null; then + if ls BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.rpi-sdimg >/dev/null 2>/dev/null; then + gzip -f BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.rpi-sdimg + ln -vn BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.rpi-sdimg.gz ${ARTIFACTS}/${MACHINE}/${I}/ + fi + if ls BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.tar.gz >/dev/null 2>/dev/null; then + ln -vn BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.tar.gz ${ARTIFACTS}/${MACHINE}/${I}/ + fi + elif ls BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.tar.gz >/dev/null 2>/dev/null \ + || ls BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.fastboot >/dev/null 2>/dev/null \ + || ls BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.epk >/dev/null 2>/dev/null; then + if ls BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*-dbg.tar.gz >/dev/null 2>/dev/null; then + ln -vn BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*-dbg.tar.gz ${ARTIFACTS}/${MACHINE}/${I}/ + fi + if ls BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.tar.gz >/dev/null 2>/dev/null; then + for TARBALL in BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.tar.gz; do + if echo $TARBALL | grep -q ".-dbg.*.tar.gz"; then + continue + fi + ln -vn ${TARBALL} ${ARTIFACTS}/${MACHINE}/${I}/ + done + fi + if ls BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.fastboot >/dev/null 2>/dev/null; then + ln -vn BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.fastboot ${ARTIFACTS}/${MACHINE}/${I}/ + fi + if ls BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.epk >/dev/null 2>/dev/null; then + ln -vn BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.epk ${ARTIFACTS}/${MACHINE}/${I}/ + fi + elif BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.tar.bz2 >/dev/null 2>/dev/null \ + || ls BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.zip >/dev/null 2>/dev/null; then + if ls BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.tar.bz2 >/dev/null 2>/dev/null; then + ln -vn BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.tar.bz2 ${ARTIFACTS}/${MACHINE}/${I}/ + fi + if ls BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.zip >/dev/null 2>/dev/null; then + ln -vn BUILD/deploy/images/${MACHINE}/${I}-${MACHINE}-*.zip ${ARTIFACTS}/${MACHINE}/${I}/ + fi + elif ls BUILD/deploy/sdk/${I}-*.sh >/dev/null 2>/dev/null; then + if [ -n "${SDKMACHINE}" ] ; then + [ -d ${ARTIFACTS}/${SDKMACHINE} ] || mkdir -p ${ARTIFACTS}/${SDKMACHINE} + ln -vn BUILD/deploy/sdk/${I}-*.sh ${ARTIFACTS}/${SDKMACHINE}/ + else + ln -vn BUILD/deploy/sdk/${I}-*.sh ${ARTIFACTS}/${MACHINE}/${I}/ + fi + else + echo "WARN: No ${I} images with recognized IMAGE_FSTYPES found to copy as build artifacts" + fi + + # delete possibly empty directories + rmdir --ignore-fail-on-non-empty ${ARTIFACTS}/${MACHINE}/${I} ${ARTIFACTS}/${MACHINE} + done + + if ls BUILD/deploy/images/${MACHINE}/partitiongroup-*.tar.bz2 >/dev/null 2>/dev/null; then + mkdir -p ${ARTIFACTS}/${MACHINE}/partitiongroups/ + # don't copy the symlinks without WEBOS_VERSION suffix + find BUILD/deploy/images/${MACHINE}/ -name partitiongroup-\*.tar.bz2 -type f -exec cp -a {} ${ARTIFACTS}/${MACHINE}/partitiongroups/ \; + fi + + if [ "${BUILD_ENABLE_RSYNC_IPK}" = "Y" ] ; then + if ls BUILD/deploy/ipk/* >/dev/null 2>/dev/null; then + cp -ra BUILD/deploy/ipk ${ARTIFACTS} + else + echo "WARN: No ipk files to copy to build artifacts" + fi + fi + + move_kernel_image_and_add_symlinks + add_md5sums_and_buildhistory_artifacts +} + +TEMP=`getopt -o I:T:M:S:j:J:B:u:bshV --long images:,targets:,machines:,scp-url:,site:,jenkins:,job:,buildhistory-ref:,bom,signatures,help,version \ + -n $(basename $0) -- "$@"` + +if [ $? != 0 ] ; then echo "Terminating..." >&2 ; exit 2 ; fi + +# Note the quotes around `$TEMP': they are essential! +eval set -- "$TEMP" + +while true ; do + case $1 in + -I|--images) IMAGES="$2" ; shift 2 ;; + -T|--targets) TARGETS="$2" ; shift 2 ;; + -M|--machines) BMACHINES="$2" ; shift 2 ;; + -S|--site) BUILD_SITE="$2" ; shift 2 ;; + -j|--jenkins) BUILD_JENKINS_SERVER="$2" ; shift 2 ;; + -J|--job) BUILD_JOB="$2" ; shift 2 ;; + -B|--buildhistory-ref) BUILD_BUILDHISTORY_PUSH_REF="$2" ; shift 2 ;; + -u|--scp-url) URL="$2" ; shift 2 ;; + -b|--bom) CREATE_BOM="Y" ; shift ;; + -s|--signatures) SIGNATURES="Y" ; shift ;; + -h|--help) showusage ; shift ;; + -V|--version) echo ${SCRIPT_NAME} ${SCRIPT_VERSION}; exit ;; + --) shift ; break ;; + *) echo "${SCRIPT_NAME} Unrecognized option '$1'"; + showusage ;; + esac +done + +# Has mcf been run and generated a makefile? +if [ ! -f "Makefile" ] ; then + echo "Make sure that mcf has been run and Makefile has been generated" + exit 2 +fi + +if [ -z "${BUILD_SITE}" -o "${BUILD_JENKINS_SERVER}" = "anaconda" ]; then + # Let the distro determine the policy on setting WEBOS_DISTRO_BUILD_ID when builds + # are unofficial + unset WEBOS_DISTRO_BUILD_ID +else + # If this is an official build, no BUILD_JOB prefix appears in + # WEBOS_DISTRO_BUILD_ID regardless of the build site. + if [ "${BUILD_JOB}" = "official" ]; then + if [ "${BUILD_SITE}" = "${AUTHORITATIVE_OFFICIAL_BUILD_SITE}" ]; then + BUILD_SITE="" + fi + BUILD_JOB="" + else + # BUILD_JOB can not contain any hyphens + BUILD_JOB="${BUILD_JOB//-/}" + fi + + # Append the separators to site and build-type. + # + # Use intermediate variables so that the remainder of the script need not concern + # itself with the separators, which are purely related to formatting the build id. + idsite="${BUILD_SITE}" + idtype="${BUILD_JOB}" + + if [ -n "$idsite" ]; then + idsite="${idsite}-" + fi + + if [ -n "$idtype" ]; then + idtype="${idtype}." + fi + + # BUILD_NUMBER should be set by the Jenkins executor + if [ -z "${BUILD_NUMBER}" ] ; then + echo "BUILD_SITE is set, but BUILD_NUMBER isn't" + exit 1 + fi + + # Format WEBOS_DISTRO_BUILD_ID as .- + export WEBOS_DISTRO_BUILD_ID=${idtype}${idsite}${BUILD_NUMBER} +fi + +# Generate BOM files with metadata checked out by mcf (pinned versions) +if [ -n "${CREATE_BOM}" -a -n "${BMACHINES}" ]; then + print_timestamp "before first bom" + if [ "${BUILD_JOB}" = "verf" -o "${BUILD_JOB}" = "mlverf" -o "${BUILD_JOB}" = "integ" -o "${BUILD_JOB}" = "engr" -o "${BUILD_JOB}" = "clean" ] ; then + # don't use -before suffix for official builds, because they don't need -after and .diff because + # there is no logic for using different revisions than weboslayers.py + BOM_FILE_SUFFIX="-before" + fi + . oe-init-build-env + for MACHINE in ${BMACHINES}; do + filter_images + for I in ${FILTERED_IMAGES} ${TARGETS}; do + generate_webos_bom "${MACHINE}" "${I}" "webos-bom${BOM_FILE_SUFFIX}.json" + done + done +fi + +print_timestamp "before verf/engr/clean logic" + +if [ "${BUILD_JOB}" = "verf" -o "${BUILD_JOB}" = "mlverf" -o "${BUILD_JOB}" = "integ" -o "${BUILD_JOB}" = "engr" ] ; then + if [ "$GERRIT_PROJECT" != "${BUILD_REPO}" ] ; then + set -e # checkout issues are critical for verification and engineering builds + for project in "${BUILD_LAYERS[@]}" ; do + check_project ${project} + done + set +e + fi + # use -k for verf and engr builds, see [ES-85] + BBFLAGS="${BBFLAGS} -k" +fi + +if [ "${BUILD_JOB}" = "clean" ] ; then + set -e # checkout issues are critical for clean build + desc="[DESC]" + for project in "${BUILD_LAYERS[@]}" ; do + desc="${desc}`check_project_vars ${project}`" + done + # This is picked by regexp in jenkins config as description of the build + echo $desc + set +e +fi + +# Generate BOM files again, this time with metadata possibly different for engineering and verification builds +if [ -n "${CREATE_BOM}" -a -n "${BMACHINES}" ]; then + if [ "${BUILD_JOB}" = "verf" -o "${BUILD_JOB}" = "mlverf" -o "${BUILD_JOB}" = "integ" -o "${BUILD_JOB}" = "engr" -o "${BUILD_JOB}" = "clean" ] ; then + print_timestamp "before 2nd bom" + . oe-init-build-env + for MACHINE in ${BMACHINES}; do + filter_images + for I in ${FILTERED_IMAGES} ${TARGETS}; do + generate_webos_bom "${MACHINE}" "${I}" "webos-bom-after.json" + diff ${ARTIFACTS}/${MACHINE}/${I}/webos-bom-before.json \ + ${ARTIFACTS}/${MACHINE}/${I}/webos-bom-after.json \ + > ${ARTIFACTS}/${MACHINE}/${I}/webos-bom-diff.txt + done + done + fi +fi + +print_timestamp "before signatures" + +if [ -n "${SIGNATURES}" -a -n "${BMACHINES}" ]; then + . oe-init-build-env + for MACHINE in ${BMACHINES}; do + mkdir -p "${ARTIFACTS}/${MACHINE}" || true + filter_images + # normally this is executed for all MACHINEs together, but we're using MACHINE-specific FILTERED_IMAGES + oe-core/scripts/sstate-diff-machines.sh --tmpdir=BUILD --targets="${FILTERED_IMAGES} ${TARGETS}" --machines="${MACHINE}" + tar cjf ${ARTIFACTS}/${MACHINE}/sstate-diff.tar.bz2 BUILD/sstate-diff/*/${MACHINE} --remove-files + done +fi + +# If there is git checkout in buildhistory dir and we have BUILD_BUILDHISTORY_PUSH_REF +# add or replace push repo in webos-local +# Write it this way so that BUILDHISTORY_PUSH_REPO is kept in the same place in webos-local.conf +if [ -d "buildhistory/.git" -a -n "${BUILD_BUILDHISTORY_PUSH_REF}" ] ; then + if [ -f webos-local.conf ] && grep -q ^BUILDHISTORY_PUSH_REPO webos-local.conf ; then + sed "s#^BUILDHISTORY_PUSH_REPO.*#BUILDHISTORY_PUSH_REPO ?= \"${BUILD_REMOTE} HEAD:${BUILD_BUILDHISTORY_PUSH_REF}\"#g" -i webos-local.conf + else + echo "BUILDHISTORY_PUSH_REPO ?= \"${BUILD_REMOTE} HEAD:${BUILD_BUILDHISTORY_PUSH_REF}\"" >> webos-local.conf + fi + echo "INFO: buildhistory will be pushed to '${BUILD_BUILDHISTORY_PUSH_REF}'" + pushd buildhistory > /dev/null + git remote -v + git branch + popd > /dev/null + +else + [ -f webos-local.conf ] && sed "/^BUILDHISTORY_PUSH_REPO.*/d" -i webos-local.conf + echo "INFO: buildhistory won't be pushed because buildhistory directory isn't git repo or BUILD_BUILDHISTORY_PUSH_REF wasn't set" +fi + +print_timestamp "before main '${JOB_NAME}' build" + +FIRST_IMAGE= +if [ -z "${BMACHINES}" ]; then + echo "ERROR: calling build.sh without -M parameter" +else + . oe-init-build-env + if [ -n "${BUILD_SDKMACHINES}" ] && echo "${IMAGES}" | grep -q "^[^- ]\+-bdk$" ; then + # if there is only one image ending with "-bdk" and BUILD_SDKMACHINES is defined + # then build it for every SDKMACHINE before moving the artifacts + for SDKMACHINE in ${BUILD_SDKMACHINES}; do + export SDKMACHINE + for MACHINE in ${BMACHINES}; do + call_bitbake + move_artifacts + done + done + else + for MACHINE in ${BMACHINES}; do + call_bitbake + move_artifacts + done + fi + + grep -R "Elapsed time" BUILD/buildstats | sed 's/^.*\/\(.*\): Elapsed time: \(.*\)$/\2 \1/g' | sort -n | tail -n 20 | tee -a ${ARTIFACTS}/top20buildstats.txt + tar cjf ${ARTIFACTS}/buildstats.tar.bz2 BUILD/buildstats + if [ -e BUILD/qa.log ]; then + ln -vn BUILD/qa.log ${ARTIFACTS} || true + # show them in console log so they are easier to spot (without downloading qa.log from artifacts + echo "WARN: Following QA issues were found:" + cat BUILD/qa.log + else + echo "NOTE: No QA issues were found." + fi + if [ -d BUILD/deploy/sources ] ; then + # exclude diff.gz files, because with old archiver they contain whole source (nothing creates .orig directory) + # see http://lists.openembedded.org/pipermail/openembedded-core/2013-December/087729.html + tar czf ${ARTIFACTS}/sources.tar.gz BUILD/deploy/sources --exclude \*.diff.gz + fi +fi + +print_timestamp "before baselines" + +# Don't do these for unofficial builds +if [ -n "${WEBOS_DISTRO_BUILD_ID}" -a "${RESULT}" -eq 0 ]; then + if [ ! -f latest_project_baselines.txt ]; then + # create dummy, especially useful for verification builds (diff against ${BUILD_REMOTE}/master) + echo ". ${BUILD_REMOTE}/master" > latest_project_baselines.txt + for project in "${BUILD_LAYERS[@]}" ; do + layer=`basename ${project}` + if [ -d "${layer}" ] ; then + echo "${layer} ${BUILD_REMOTE}/master" >> latest_project_baselines.txt + fi + done + fi + + command \ + meta-webosose/meta-webos/scripts/build-changes/update_build_changes.sh \ + "${BUILD_NUMBER}" \ + "${URL}" 2>&1 || printf "\nChangelog generation failed or script not found.\nPlease check lines above for errors\n" + ln -vn build_changes.log ${ARTIFACTS} || true +fi + +print_timestamp "stop" + +cd "${CALLDIR}" + +# only the result from bitbake/make is important +exit ${RESULT} + +# vim: ts=2 sts=2 sw=2 et diff --git a/scripts/prerequisites.sh b/scripts/prerequisites.sh new file mode 100755 index 00000000..15628ddb --- /dev/null +++ b/scripts/prerequisites.sh @@ -0,0 +1,161 @@ +#!/bin/bash -e + +# Copyright (c) 2008-2013 LG Electronics, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This has only been tested on Ubuntu-12.04 and Ubuntu-14.04 amd64. + +check_sanity=true +usage="$0 [--help|-h] [--version|-V] [--force|-f]" +version="2.2.1" +statusfile="/etc/webos.prerequisites" + +for i ; do + case "$i" in + --help|-h) echo ${usage}; exit 0 ;; + --force|-f) rm -f $statusfile ;; + --version|-V) echo ${version}; exit 0 ;; + *) + echo Unrecognized option: $i 1>&2 + echo ${usage} + exit 1 + ;; + esac +done + +function checknumber() +{ + local rvalue=$2 + local number + a=$(echo $1 | cut -d '.' -f 1) + b=$(echo $1 | cut -d '.' -f 2) + c=$(echo $1 | cut -d '.' -f 3) + if [ "$a" -gt "255" ]; then echo "Error: $a is out of range[0,255]."; exit 1; fi + if [ "$b" -gt "255" ]; then echo "Error: $b is out of range[0,255]."; exit 1; fi + if [ "$c" -gt "255" ]; then echo "Error: $c is out of range[0,255]."; exit 1; fi + (( number = $c + ($b << 8) + ($a << 16) )) + eval $rvalue="'$number'" +} +checknumber $version refversion + +if [ -e $statusfile ] +then + checknumber $(cat $statusfile) newversion + if [ "$refversion" -le "$newversion" ] + then + echo "latest version of $0 appears to have been successfully run, use -f option to force script to run." + exit 0 + fi +fi + +sane=true + +distributor_id_sane="^Ubuntu$" +release_sane="^14.04$" +codename_sane="^trusty$" +arch_sane="^amd64$" + +case "${check_sanity}" in + true) + if [ ! -x /usr/bin/lsb_release ] ; then + echo 'WARNING: /usr/bin/lsb_release not available, cannot test sanity of this system.' 1>&2 + sane=false + else + distributor_id=`/usr/bin/lsb_release -s -i` + release=`/usr/bin/lsb_release -s -r` + codename=`/usr/bin/lsb_release -s -c` + + if ! echo "${distributor_id}" | egrep -q "${distributor_id_sane}"; then + echo "WARNING: Distributor ID reported by lsb_release '${distributor_id}' not in '${distributor_id_sane}'" 1>&2 + sane=false + fi + + if ! echo "${release}" | egrep -q "${release_sane}"; then + echo "WARNING: Release reported by lsb_release '${release}' not in '${release_sane}'" 1>&2 + sane=false + fi + + if ! echo "${codename}" | egrep -q "${codename_sane}"; then + echo "WARNING: Codename reported by lsb_release '${codename}' not in '${codename_sane}'" 1>&2 + sane=false + fi + fi + + if [ ! -x /usr/bin/dpkg ] ; then + echo 'WARNING: /usr/bin/dpkg not available, cannot test architecture of this system.' 1>&2 + sane=false + else + arch=`/usr/bin/dpkg --print-architecture` + if ! echo "${arch}" | egrep -q "${arch_sane}"; then + echo "WARNING: Architecture reported by dpkg --print-architecture '${arch}' not in '${arch_sane}'" 1>&2 + sane=false + fi + fi + + case "${sane}" in + true) ;; + false) + echo 'WARNING: This system configuration is untested. Let us know if it works.' 1>&2 + ;; + esac + ;; + + false) ;; +esac + +apt-get update + +# These are essential to pass OE sanity test +# locales, because utf8 is needed with newer bitbake which uses python3 +essential="\ + build-essential \ + chrpath \ + cpio \ + diffstat \ + gawk \ + git \ + iputils-ping \ + locales \ + lsb-release \ + python \ + python2.7 \ + python3 \ + texinfo \ + wget \ +" + +# bzip2, gzip, tar, zip are used by our scripts/build.sh +archivers="\ + bzip2 \ + gzip \ + tar \ + zip \ +" + +# gcc-multilib is needed to build 32bit version of pseudo +# g++-multilib is needed to build and run 32bit mksnapshot of V8 (in chromium53) +extras="\ + gcc-multilib \ + g++-multilib \ + time \ +" + +apt-get install --yes \ + ${essential} \ + ${extras} \ + ${archivers} \ + +locale-gen en_US.utf8 + +echo $version > $statusfile diff --git a/weboslayers.py b/weboslayers.py new file mode 100644 index 00000000..0c1f56a0 --- /dev/null +++ b/weboslayers.py @@ -0,0 +1,81 @@ +# Copyright (c) 2008-2018 LG Electronics, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This implementation introduces next generation build environment for +# Open webOS. The change introduces a mechanism to add additional layers to the +# base ones: oe-core, meta-oe, and meta-webos, and to specify the commits to be +# used for each. mcf now expects the layers to be defined in this file +# (weboslayers.py in the same directory as mcf) as a list of Python data tuples: +# +# webos_layers = [ +# ('layer-name', priority, 'URL', 'submission', 'working-dir'), +# ... +# ] +# +# where: +# +# layer-name = Unique identifier; it represents the layer directory containing +# conf/layer.conf. +# +# priority = Integer layer priority as defined by OpenEmbedded. It also +# specifies the order in which layers are searched for files. +# Larger values have higher priority. A value of -1 indicates +# that the entry is not a layer; for example, bitbake. +# +# URL = The Git repository address for the layer from which to clone. +# A value of '' skips the cloning. +# +# submission = The information used by Git to checkout and identify the precise +# content. Submission values could be "branch=" and +# "commit=" or "tag=". Omitted branch information means +# only that "master" will be used as the name of the local +# branch. Omitted commit or tag means origin/HEAD will be checked +# out (which might NOT be origin/master, although +# it usually is). +# +# working-dir = Alternative directory for the layer. +# +# The name of the distribution is also defined in this file +# along with a list of the valid MACHINE-s +# + +Distribution = "webos" + +# Supported MACHINE-s +Machines = ['raspberrypi3'] + +# github.com/openembedded repositories are read-only mirrors of the authoritative +# repositories on git.openembedded.org +webos_layers = [ +('bitbake', -1, 'git://github.com/openembedded/bitbake.git', 'branch=1.32,commit=5f5e13b', ''), + +('meta', 5, 'git://github.com/openembedded/openembedded-core.git', 'branch=morty,commit=1718f0a', 'oe-core'), + +('meta-oe', 10, 'git://github.com/openembedded/meta-openembedded.git', 'branch=morty,commit=b40116c', 'meta-oe'), +('meta-multimedia', 11, 'git://github.com/openembedded/meta-openembedded.git', '', 'meta-oe'), +('meta-networking', 12, 'git://github.com/openembedded/meta-openembedded.git', '', 'meta-oe'), +('meta-python', 13, 'git://github.com/openembedded/meta-openembedded.git', '', 'meta-oe'), +('meta-filesystems', 14, 'git://github.com/openembedded/meta-openembedded.git', '', 'meta-oe'), + +('meta-qt5', 20, 'git://github.com/meta-qt5/meta-qt5.git', 'branch=krogoth,commit=f8584d7', ''), + +('meta-webos-backports-2.3', 30, 'git://github.com/webosose/meta-webosose.git', '', ''), +('meta-webos-backports-2.4', 31, 'git://github.com/webosose/meta-webosose.git', '', ''), +('meta-webos-backports-2.5', 32, 'git://github.com/webosose/meta-webosose.git', '', ''), + +('meta-webos', 40, 'git://github.com/webosose/meta-webosose.git', 'branch=master,commit=7c8e550', ''), + +('meta-raspberrypi', 50, 'git://git.yoctoproject.org/meta-raspberrypi', 'branch=morty,commit=2a19226', ''), +('meta-webos-raspberrypi', 51, 'git://github.com/webosose/meta-webosose.git', '', ''), +]