mirror of
https://gitlab.com/prpl-foundation/prplos/prplos.git
synced 2025-12-20 00:56:07 +08:00
ci: copy over files from prplOS v1.5 release
In order to have QA in place. Signed-off-by: Petr Štetiar <ynezz@true.cz>
This commit is contained in:
committed by
Petr Štetiar
parent
0cdbbd8868
commit
f78b369cf0
112
.gitlab-ci.yml
Normal file
112
.gitlab-ci.yml
Normal file
@@ -0,0 +1,112 @@
|
||||
variables:
|
||||
CI_DESIGNATED_BRANCH: prplos
|
||||
|
||||
include:
|
||||
- local: .gitlab/build.yml
|
||||
- local: .gitlab/cdrouter.yml
|
||||
- local: .gitlab/coverity.yml
|
||||
- local: .gitlab/docker.yml
|
||||
- local: .gitlab/docker/builder/gitlab.yml
|
||||
- local: .gitlab/docker/testbed/gitlab.yml
|
||||
- local: .gitlab/docker/sdk/gitlab.yml
|
||||
- local: .gitlab/testbed.yml
|
||||
- local: .gitlab/testbed/nec-wx3000hp.yml
|
||||
- local: .gitlab/testbed/glinet-b1300.yml
|
||||
- local: .gitlab/testbed/turris-omnia.yml
|
||||
|
||||
stages:
|
||||
- docker
|
||||
- docker test
|
||||
- docker deploy
|
||||
- build
|
||||
- docker SDK
|
||||
- docker SDK test
|
||||
- docker SDK deploy
|
||||
- run
|
||||
- cdrouter
|
||||
- coverity
|
||||
|
||||
build test intel_mips prpl:
|
||||
extends: .build test config
|
||||
|
||||
build test ipq40xx prpl:
|
||||
extends: .build test config
|
||||
|
||||
build test mvebu prpl:
|
||||
extends: .build test config
|
||||
|
||||
coverity scan ipq40xx prpl:
|
||||
extends: .coverity scan config
|
||||
variables:
|
||||
CI_COVERITY_COMPILER_TEMPLATE_LIST: >
|
||||
arm-openwrt-linux-gcc
|
||||
arm-openwrt-linux-muslgnueabi-gcc
|
||||
|
||||
run test NEC WX3000HP with system on NAND:
|
||||
needs:
|
||||
- build test intel_mips prpl
|
||||
extends: .nec-wx3000hp testbed
|
||||
|
||||
run test Gl.iNet B1300 with system on initramfs:
|
||||
needs:
|
||||
- build test ipq40xx prpl
|
||||
extends: .glinet-b1300 testbed
|
||||
|
||||
run test Turris Omnia with system on initramfs:
|
||||
needs:
|
||||
- build test mvebu prpl
|
||||
extends: .turris-omnia testbed
|
||||
|
||||
CDRouter CDRouter-Top-100 package on NEC WX3000HP with system on NAND:
|
||||
needs:
|
||||
- build test intel_mips prpl
|
||||
extends:
|
||||
- .nec-wx3000hp testbed
|
||||
- .cdrouter
|
||||
variables:
|
||||
LABGRID_TARGET: "$DUT_BOARD-firstboot"
|
||||
|
||||
CDRouter CDRouter-Top-100 package on Gl.iNet B1300 with system on initramfs:
|
||||
needs:
|
||||
- build test ipq40xx prpl
|
||||
extends:
|
||||
- .glinet-b1300 testbed
|
||||
- .cdrouter
|
||||
|
||||
CDRouter CDRouter-Top-100 package on Turris Omnia with system on initramfs:
|
||||
needs:
|
||||
- build test mvebu prpl
|
||||
extends:
|
||||
- .turris-omnia testbed
|
||||
- .cdrouter
|
||||
|
||||
CDRouter CDRouter-TR-069 package on NEC WX3000HP with system on NAND:
|
||||
allow_failure: true
|
||||
needs:
|
||||
- build test intel_mips prpl
|
||||
extends:
|
||||
- .nec-wx3000hp testbed
|
||||
- .cdrouter
|
||||
|
||||
CDRouter CDRouter-TR-069 package on Gl.iNet B1300 with system on NOR:
|
||||
allow_failure: true
|
||||
needs:
|
||||
- build test ipq40xx prpl
|
||||
extends:
|
||||
- .glinet-b1300 testbed
|
||||
- .cdrouter
|
||||
variables:
|
||||
LABGRID_TARGET: glinet-b1300-nor
|
||||
TFTP_IMAGE_FILENAME: openwrt-ipq40xx-generic-glinet_gl-b1300-squashfs-sysupgrade.bin
|
||||
|
||||
CDRouter CDRouter-TR-069 package on Turris Omnia with system on eMMC:
|
||||
allow_failure: true
|
||||
needs:
|
||||
- build test mvebu prpl
|
||||
extends:
|
||||
- .turris-omnia testbed
|
||||
- .cdrouter
|
||||
variables:
|
||||
LABGRID_TARGET: turris-omnia-emmc
|
||||
TFTP_IMAGE_FILENAME: openwrt-mvebu-cortexa9-cznic_turris-omnia-sysupgrade.img.gz
|
||||
TFTP_IMAGE_UNPACK_COMMAND: "gunzip --force $TESTBED_TFTP_PATH/$TFTP_IMAGE_FILENAME || true"
|
||||
108
.gitlab/README.md
Normal file
108
.gitlab/README.md
Normal file
@@ -0,0 +1,108 @@
|
||||
# Content
|
||||
|
||||
This directory contains stuff used on the GitLab CI.
|
||||
|
||||
## docker
|
||||
|
||||
Contains definitions for Docker images.
|
||||
|
||||
## testbed
|
||||
|
||||
Contains definitions for testbed related tests.
|
||||
|
||||
## scripts
|
||||
|
||||
Contains scripts used on GitLab CI.
|
||||
|
||||
## tests
|
||||
|
||||
Contains tests used on GitLab CI.
|
||||
|
||||
## build.yml
|
||||
|
||||
Contains definition of the GitLab CI build test job templates used during `build` stage. Those templates can then be extended by other GitLab CI jobs in order to keep things DRY.
|
||||
|
||||
### .build test config
|
||||
|
||||
Allows build testing of specific configurations defined in the profiles directory, using `scripts/gen_config.py` under the hood.
|
||||
|
||||
Example usage:
|
||||
|
||||
```yaml
|
||||
include:
|
||||
- local: .gitlab/build.yml
|
||||
|
||||
stages:
|
||||
- build
|
||||
|
||||
build test netgear-rax40 prpl webui:
|
||||
extends: .build test config
|
||||
```
|
||||
|
||||
Which is going to build test prplOS with `netgear-rax40`, `prpl` and `webui` profiles.
|
||||
|
||||
Description of CI variables:
|
||||
|
||||
* CI_BUILD_CONFIG_EXTRA - list of additional config options which should be enabled(prefixed with `+`) or disabled (prefixed with `-`), for example `+BUILD_LOG -SDK` would result in `CONFIG_BUILD_LOG=y` and `CONFIG_SDK=y` config symbols in `.config` file.
|
||||
|
||||
## docker.yml
|
||||
|
||||
Provides support for building, tagging and pushing the Docker images to image registry.
|
||||
|
||||
For example let's build image `foo`.
|
||||
|
||||
Prerequisites:
|
||||
|
||||
* Create directory for new Docker image `mkdir -p .gitlab/docker/foo`
|
||||
* Docker image description `$EDITOR .gitlab/docker/foo/Dockefile`
|
||||
|
||||
Then just put following into `.gitlab/docker/foo/gitlab.yml`
|
||||
|
||||
```yaml
|
||||
build Docker image foo:
|
||||
extends: .build Docker image
|
||||
```
|
||||
|
||||
## testbed.yml
|
||||
|
||||
Provides bits needed for runtime testing on real device using [labgrid](https://labgrid.readthedocs.io/en/latest/) Python testing framework.
|
||||
|
||||
Supported devices:
|
||||
|
||||
* Netgear RAX40
|
||||
|
||||
## sdk.yml
|
||||
|
||||
Provides support for the [OpenWrt SDK](https://openwrt.org/docs/guide-developer/using_the_sdk) utilizing the [Docker SDK container](https://gitlab.com/prpl-foundation/prplOS/prplos/-/tree/prplos/.gitlab/docker/sdk) under the hood.
|
||||
|
||||
### .build feed with SDK
|
||||
|
||||
Allows compile testing of packages using the SDK.
|
||||
|
||||
Example usage, build testing of [safec3](https://gitlab.com/prpl-foundation/intel/feed_opensource_apps/-/tree/ugw-8.4.1/safec3) package from Intel's `feed_opensource_apps` feed.
|
||||
|
||||
```yaml
|
||||
include:
|
||||
- remote: https://gitlab.com/prpl-foundation/prplOS/prplos/-/raw/prplos/.gitlab/sdk.yml
|
||||
|
||||
build:
|
||||
extends: .build feed with SDK
|
||||
variables:
|
||||
CI_SDK_INSTALL_FEEDS: feed_target_mips
|
||||
CI_SDK_BUILD_PACKAGES: safec3
|
||||
```
|
||||
|
||||
Description of CI variables:
|
||||
|
||||
* CI_SDK_INSTALL_FEEDS - list of feeds which should be installed prior to building packages.
|
||||
* CI_SDK_BUILD_PACKAGES - list of packages which should be compile tested.
|
||||
* CI_SDK_BUILD_PARALLEL - max number of threads to use for make (defaults to `nproc` if unset).
|
||||
|
||||
### .generate SDK package build jobs
|
||||
|
||||
Generates `sdk-package-jobs.yml` with separate build jobs for every package so the builds of those packages
|
||||
can run in parallel. It's supposed to be executed by the child pipeline `.execute SDK package build jobs`.
|
||||
|
||||
### .execute SDK package build jobs
|
||||
|
||||
It's supposed to execute jobs generated in `sdk-package-jobs.yml` generated by `.generate SDK package build jobs` job.
|
||||
80
.gitlab/build.yml
Normal file
80
.gitlab/build.yml
Normal file
@@ -0,0 +1,80 @@
|
||||
.build test config:
|
||||
stage: build
|
||||
image: "$CI_REGISTRY_IMAGE/$CI_DESIGNATED_BRANCH/builder:latest"
|
||||
tags:
|
||||
- firmware-builder
|
||||
variables:
|
||||
CI_BUILD_CONFIG: >
|
||||
+DEVEL +BUILD_LOG +SDK
|
||||
rules:
|
||||
- if: '$CI_PIPELINE_SOURCE == "schedule"'
|
||||
when: never
|
||||
- if: '$CI_COMMIT_BRANCH == $CI_DESIGNATED_BRANCH'
|
||||
- when: manual
|
||||
before_script:
|
||||
- touch .build_failed
|
||||
|
||||
script:
|
||||
- mkdir logs
|
||||
- set -o pipefail
|
||||
|
||||
- export CI_BUILD_PROFILE="$(echo $CI_JOB_NAME | sed 's/build test \(.*\)/\1/')"
|
||||
- scripts/gen_config.py $CI_BUILD_PROFILE 2>&1 | tee logs/build.log
|
||||
- >
|
||||
for option in $CI_BUILD_CONFIG $CI_BUILD_CONFIG_EXTRA; do
|
||||
echo "$option" | sed -E "s/^\+(.*)$/CONFIG_\1=y/;s/^\-(.*)$/CONFIG_\1=n/" >> .config
|
||||
done
|
||||
- cat .config
|
||||
- make defconfig | tee --append logs/build.log
|
||||
|
||||
- make -j $(nproc) tools/tar/compile 2>&1 | tee --append logs/build.log
|
||||
- make -j $(nproc) download check FIXUP=1 2>&1 | tee --append logs/build.log
|
||||
|
||||
- >
|
||||
git diff-index --exit-code HEAD || {
|
||||
ret=$?
|
||||
echo "Package integrity issues, please check packages-hash-issues.patch from artifacts"
|
||||
git diff > packages-hash-issues.patch
|
||||
exit $ret
|
||||
}
|
||||
|
||||
- >
|
||||
topdir=$(pwd);
|
||||
for feed in $(find feeds -name .git); do
|
||||
pushd $(dirname $feed) > /dev/null; git diff-index --exit-code HEAD || {
|
||||
ret=$?
|
||||
echo "Feed $(dirname $feed) packages integrity issues, please check feed-packages-hash-issues.patch from artifacts"
|
||||
git diff > $topdir/feed-packages-hash-issues.patch
|
||||
exit $ret
|
||||
}
|
||||
popd > /dev/null
|
||||
done
|
||||
|
||||
- make -j $(nproc) 2>&1 | tee --append logs/build.log
|
||||
- rm .build_failed
|
||||
|
||||
after_script:
|
||||
- >
|
||||
if test -f .build_failed && grep -qr 'make\[[[:digit:]]\].*Error [[:digit:]]$' logs; then
|
||||
printf "\n====== Showing Make errors found in the log files ======";
|
||||
for file in $(grep -lr 'make\[[[:digit:]]\].*Error [[:digit:]]$' logs); do
|
||||
printf "\n====== Make errors from $CI_JOB_URL/artifacts/file/$file ======\n" ;
|
||||
grep -r -C5 'make\[[[:digit:]]\].*Error [[:digit:]]$' $file ;
|
||||
done
|
||||
fi
|
||||
- >
|
||||
[ "$CI_COMMIT_BRANCH" = "$CI_DESIGNATED_BRANCH" ] && {
|
||||
test -f .build_failed && .gitlab/scripts/prpl-jira.py build_failure || true
|
||||
} || true
|
||||
|
||||
artifacts:
|
||||
expire_in: 1 month
|
||||
when: always
|
||||
paths:
|
||||
- bin
|
||||
- logs
|
||||
- ./*packages-hash-issues.patch
|
||||
cache:
|
||||
key: openwrt-downloads
|
||||
paths:
|
||||
- dl/
|
||||
130
.gitlab/cdrouter.yml
Normal file
130
.gitlab/cdrouter.yml
Normal file
@@ -0,0 +1,130 @@
|
||||
.cdrouter:
|
||||
stage: cdrouter
|
||||
image: "$CI_REGISTRY_IMAGE/$CI_DESIGNATED_BRANCH/testbed:latest"
|
||||
rules:
|
||||
- if: '$CI_PIPELINE_SOURCE == "schedule"'
|
||||
when: never
|
||||
- if: '$CI_COMMIT_BRANCH == $CI_DESIGNATED_BRANCH'
|
||||
- when: manual
|
||||
variables:
|
||||
TEST_CONFIG: generic
|
||||
DUT_SLEEP_AFTER_BOOT: 20
|
||||
CDROUTER_LAN_INTERFACE: eth1
|
||||
CDROUTER_WAN_INTERFACE: eth2
|
||||
CDROUTER_ETH0_MAC_ADDRESS: 00:03:2d:49:2e:d8
|
||||
|
||||
before_script:
|
||||
- sudo etherwake -i $TESTBED_MNG_INTERFACE $CDROUTER_ETH0_MAC_ADDRESS 2> /dev/null
|
||||
- sudo ip link set $TESTBED_LAN_INTERFACE up 2> /dev/null
|
||||
- sudo ip link set $TESTBED_WAN_INTERFACE up 2> /dev/null
|
||||
- sleep 10
|
||||
|
||||
- eval $(ssh-agent -s)
|
||||
- echo "$TESTBED_SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add -
|
||||
- mkdir -p ~/.ssh; chmod 700 ~/.ssh
|
||||
- ssh-keyscan $TESTBED_UART_RELAY_HOST > ~/.ssh/known_hosts 2> /dev/null
|
||||
- chmod 644 ~/.ssh/known_hosts
|
||||
|
||||
- >
|
||||
if [ -n "$TFTP_IMAGE_DOWNLOAD_URL" ]; then
|
||||
echo "Downloading firmware image from $TFTP_IMAGE_DOWNLOAD_URL"
|
||||
curl "$TFTP_IMAGE_DOWNLOAD_URL" > "$TESTBED_TFTP_PATH/$TFTP_IMAGE_FILENAME"
|
||||
else
|
||||
echo "Using firmware image $TFTP_IMAGE_PATH/$TFTP_IMAGE_FILENAME"
|
||||
cp "$TFTP_IMAGE_PATH/$TFTP_IMAGE_FILENAME" "$TESTBED_TFTP_PATH"
|
||||
fi
|
||||
- >
|
||||
if [ -n "$TFTP_IMAGE_UNPACK_COMMAND" ]; then
|
||||
echo "Running $TFTP_IMAGE_UNPACK_COMMAND"
|
||||
eval "$TFTP_IMAGE_UNPACK_COMMAND"
|
||||
fi
|
||||
- .gitlab/scripts/testbed-device.py --target $LABGRID_TARGET boot_into shell
|
||||
- sudo ip link set $TESTBED_WAN_INTERFACE down 2> /dev/null
|
||||
- >
|
||||
.gitlab/scripts/testbed-device.py
|
||||
--target $LABGRID_TARGET check_network
|
||||
--network lan
|
||||
--remote-host $TARGET_LAN_TEST_HOST
|
||||
|
||||
- >
|
||||
retry_count=3;
|
||||
while [ $retry_count -gt 0 ]; do
|
||||
echo "Waiting for SSH availability on $TARGET_LAN_IP"
|
||||
ssh-keyscan "$TARGET_LAN_IP" 2>&1 | grep -q "$TARGET_LAN_IP" && break
|
||||
retry_count="$(( retry_count - 1 ))"
|
||||
sleep 5
|
||||
done
|
||||
|
||||
- ssh-keyscan $TARGET_LAN_IP >> ~/.ssh/known_hosts 2> /dev/null
|
||||
- ssh root@$TARGET_LAN_IP "cat /var/log/messages" > syslog-$LABGRID_TARGET.txt 2> /dev/null || true
|
||||
- ssh root@$TARGET_LAN_IP "ubus call system board" | tee system-$LABGRID_TARGET.json
|
||||
|
||||
- scp .gitlab/mockups/tr181-mockups.tar.gz root@${TARGET_LAN_IP}:/tmp/
|
||||
- ssh root@$TARGET_LAN_IP "tar xzf /tmp/tr181-mockups.tar.gz -C / && /etc/init.d/tr181-mockups start"
|
||||
|
||||
- .gitlab/scripts/testbed-cdrouter.py package_stop
|
||||
- .gitlab/scripts/testbed-cdrouter.py wait_for_netif $CDROUTER_LAN_INTERFACE
|
||||
- .gitlab/scripts/testbed-cdrouter.py wait_for_netif $CDROUTER_WAN_INTERFACE
|
||||
|
||||
- sudo ip link set $TESTBED_LAN_INTERFACE down 2> /dev/null
|
||||
|
||||
script:
|
||||
- sleep $DUT_SLEEP_AFTER_BOOT
|
||||
|
||||
- export TEST_PACKAGE="$(echo $CI_JOB_NAME | sed 's/CDRouter \(.*\) package on .*/\1/')"
|
||||
- .gitlab/scripts/testbed-cdrouter.py config_import $TEST_CONFIG
|
||||
- .gitlab/scripts/testbed-cdrouter.py package_import $TEST_PACKAGE.gz
|
||||
- >
|
||||
.gitlab/scripts/testbed-cdrouter.py
|
||||
package_run $TEST_PACKAGE
|
||||
--device generic
|
||||
--configuration $TEST_CONFIG
|
||||
--system-info system-$LABGRID_TARGET.json
|
||||
|
||||
after_script:
|
||||
- >
|
||||
test -f *-logdir.tgz &&
|
||||
mkdir -p cdrouter-results &&
|
||||
tar xf *-logdir.tgz --strip-components=1 --directory=cdrouter-results &&
|
||||
cat cdrouter-results/final.txt &&
|
||||
.gitlab/scripts/cdrouter-pretty-failures.awk cdrouter-results/*.txt
|
||||
|
||||
- eval $(ssh-agent -s)
|
||||
- echo "$TESTBED_SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add -
|
||||
- mkdir -p ~/.ssh; chmod 700 ~/.ssh
|
||||
- ssh-keyscan $TESTBED_UART_RELAY_HOST > ~/.ssh/known_hosts 2> /dev/null
|
||||
- chmod 644 ~/.ssh/known_hosts
|
||||
|
||||
- sudo ip link set $TESTBED_LAN_INTERFACE up 2> /dev/null
|
||||
- sudo ip link set $TESTBED_WAN_INTERFACE up 2> /dev/null
|
||||
- sleep 5
|
||||
|
||||
- ssh-keyscan $TARGET_LAN_IP >> ~/.ssh/known_hosts 2> /dev/null
|
||||
- >
|
||||
ssh root@$TARGET_LAN_IP exit && {
|
||||
ssh root@$TARGET_LAN_IP ps > processes-$LABGRID_TARGET.txt
|
||||
ssh root@$TARGET_LAN_IP dmesg > dmesg-$LABGRID_TARGET.txt
|
||||
ssh root@$TARGET_LAN_IP "cat /var/log/messages" > syslog-$LABGRID_TARGET.txt
|
||||
ssh root@$TARGET_LAN_IP opkg list > opkg_list-$LABGRID_TARGET.txt
|
||||
scp -r root@${TARGET_LAN_IP}:/etc etc > /dev/null
|
||||
scp -r root@${TARGET_LAN_IP}:/tmp/beerocks/logs prplmesh_beerocks_logs > /dev/null
|
||||
} || true
|
||||
|
||||
- .gitlab/scripts/testbed-device.py --target $LABGRID_TARGET power off
|
||||
- mv console_$LABGRID_TARGET console_$LABGRID_TARGET.txt || true
|
||||
|
||||
- .gitlab/scripts/testbed-cdrouter.py package_stop
|
||||
|
||||
artifacts:
|
||||
expire_in: 1 month
|
||||
when: always
|
||||
paths:
|
||||
- cdrouter-results
|
||||
- etc
|
||||
- prplmesh_beerocks_logs
|
||||
- processes-$LABGRID_TARGET.txt
|
||||
- dmesg-$LABGRID_TARGET.txt
|
||||
- syslog-$LABGRID_TARGET.txt
|
||||
- system-$LABGRID_TARGET.json
|
||||
- console_$LABGRID_TARGET.txt
|
||||
- opkg_list-$LABGRID_TARGET.txt
|
||||
190
.gitlab/coverity.yml
Normal file
190
.gitlab/coverity.yml
Normal file
@@ -0,0 +1,190 @@
|
||||
.coverity scan config:
|
||||
stage: coverity
|
||||
image: "$CI_REGISTRY_IMAGE/$CI_DESIGNATED_BRANCH/builder:latest"
|
||||
tags:
|
||||
- firmware-builder
|
||||
dependencies: []
|
||||
variables:
|
||||
CI_BUILD_CONFIG: >
|
||||
+DEVEL +BUILD_LOG +ALL +ALL_KMODS +ALL_NONSHARED
|
||||
CI_COVERITY_PROJECT_NAME: prplOS
|
||||
CI_COVERITY_FORCE_COMPILE_PACKAGE_LIST: >
|
||||
curl
|
||||
libnl
|
||||
mbedtls
|
||||
openssl
|
||||
CI_COVERITY_OPENWRT_PACKAGE_LIST: >
|
||||
cgi-io
|
||||
dropbear
|
||||
dnsmasq
|
||||
firewall
|
||||
fstools
|
||||
iwinfo
|
||||
jsonfilter
|
||||
libnl-tiny
|
||||
libubox
|
||||
netifd
|
||||
odhcp6c
|
||||
odhcpd
|
||||
opkg
|
||||
procd
|
||||
rpcd
|
||||
swconfig
|
||||
ubox
|
||||
ubus
|
||||
ucert
|
||||
uci
|
||||
uclient
|
||||
uhttpd
|
||||
umdns
|
||||
usign
|
||||
ustream-ssl
|
||||
CI_COVERITY_PRPLOS_PACKAGE_LIST: >
|
||||
acl-manager
|
||||
amxb-inspect
|
||||
amx-cli
|
||||
amx-fcgi
|
||||
amxo-cg
|
||||
amxrt
|
||||
cthulhu
|
||||
cthulhu-lxc
|
||||
deviceinfo-manager
|
||||
dhcpv4-manager
|
||||
dhcpv6s-manager
|
||||
ethernet-manager
|
||||
gmap-mibs-common
|
||||
gmap-mod-ethernet-dev
|
||||
gmap-mod-name-selector
|
||||
gmap-mod-self
|
||||
gmap-server
|
||||
ip-manager
|
||||
libamxa
|
||||
libamxb
|
||||
libamxc
|
||||
libamxd
|
||||
libamxj
|
||||
libamxm
|
||||
libamxo
|
||||
libamxp
|
||||
libamxs
|
||||
libamxt
|
||||
libdhcpoptions
|
||||
libfwinterface
|
||||
libfwrules
|
||||
libgmap-client
|
||||
libnetmodel
|
||||
libsahtrace
|
||||
libtrace
|
||||
moca-manager
|
||||
mod-amxb-ubus
|
||||
mod-ba-cli
|
||||
mod-dm-cli
|
||||
mod-dmext
|
||||
mod-dmproxy
|
||||
mod-dmstats
|
||||
mod-netmodel
|
||||
mod-pcm-svc
|
||||
mod-sahtrace
|
||||
mod-vlan-ioctl
|
||||
mod-vlan-uci
|
||||
netdev-plugin
|
||||
netmodel
|
||||
netmodel-ip
|
||||
pcm-manager
|
||||
prplmesh
|
||||
rlyeh
|
||||
routing-manager
|
||||
time-manager
|
||||
timingila
|
||||
timingila-cthulhu
|
||||
timingila-rlyeh
|
||||
tr181-bridging
|
||||
tr181-device
|
||||
tr181-dhcpv4client
|
||||
tr181-dhcpv6client
|
||||
tr181-firewall
|
||||
tr181-qos
|
||||
tr181-routeradvertisement
|
||||
tr181-usermanagement
|
||||
umdns
|
||||
uriparser
|
||||
wan-autosensing
|
||||
wan-manager
|
||||
|
||||
rules:
|
||||
- if: $CI_PIPELINE_SOURCE == "schedule"
|
||||
|
||||
script:
|
||||
- mkdir logs
|
||||
- set -o pipefail
|
||||
|
||||
- export CI_BUILD_PROFILE="$(echo $CI_JOB_NAME | sed 's/coverity scan \(.*\)/\1/')"
|
||||
- scripts/gen_config.py $CI_BUILD_PROFILE 2>&1 | tee logs/build.log
|
||||
- >
|
||||
for option in $CI_BUILD_CONFIG $CI_BUILD_CONFIG_EXTRA; do
|
||||
echo "$option" | sed -E "s/^\+(.*)$/CONFIG_\1=y/;s/^\-(.*)$/CONFIG_\1=n/" >> .config
|
||||
done
|
||||
- cat .config
|
||||
- make defconfig | tee --append logs/build.log
|
||||
|
||||
- wget -q https://scan.coverity.com/download/linux64 --post-data "token=$COVERITY_PROJECT_TOKEN&project=$CI_COVERITY_PROJECT_NAME" -O coverity.tar.gz
|
||||
- wget -q https://scan.coverity.com/download/linux64 --post-data "token=$COVERITY_PROJECT_TOKEN&project=$CI_COVERITY_PROJECT_NAME&md5=1" -O coverity.tar.gz.md5
|
||||
- echo ' coverity.tar.gz' >> coverity.tar.gz.md5
|
||||
- md5sum -c coverity.tar.gz.md5
|
||||
|
||||
- mkdir cov-analysis-linux64
|
||||
- tar xzf coverity.tar.gz --strip 1 -C cov-analysis-linux64
|
||||
- export PATH=$(pwd)/cov-analysis-linux64/bin:$PATH
|
||||
- >
|
||||
for template in $CI_COVERITY_COMPILER_TEMPLATE_LIST; do
|
||||
cov-configure --template --comptype gcc --compiler $template
|
||||
done
|
||||
|
||||
- make -j $(nproc) tools/tar/compile || make -j1 V=s tools/tar/compile 2>&1 | tee --append logs/build.log
|
||||
- make -j $(nproc) download check FIXUP=1 || make -j1 V=s download check FIXUP=1 2>&1 | tee --append logs/build.log
|
||||
- make -j $(nproc) || make -j1 V=s 2>&1 | tee --append logs/build.log
|
||||
|
||||
- printf -v clean_packages "package/%s/clean " ${CI_COVERITY_OPENWRT_PACKAGE_LIST[@]} ${CI_COVERITY_PRPLOS_PACKAGE_LIST[@]}
|
||||
- make $clean_packages
|
||||
|
||||
- printf -v force_compile_packages "package/%s/compile " ${CI_COVERITY_FORCE_COMPILE_PACKAGE_LIST[@]}
|
||||
- make -j $(nproc) $force_compile_packages
|
||||
|
||||
- echo "$COVERITY_PROJECT_TOKEN" > ./coverity-api-token
|
||||
- >
|
||||
unset
|
||||
CI_JOB_JWT
|
||||
CI_JOB_TOKEN
|
||||
CI_BUILD_TOKEN
|
||||
CI_RUNNER_SHORT_TOKEN
|
||||
CI_DEPENDENCY_PROXY_PASSWORD
|
||||
CI_REPOSITORY_URL
|
||||
CI_REGISTRY_PASSWORD
|
||||
TESTBED_SSH_PRIVATE_KEY
|
||||
COVERITY_PROJECT_TOKEN
|
||||
|
||||
- printf -v compile_packages "package/%s/compile " ${CI_COVERITY_OPENWRT_PACKAGE_LIST[@]} ${CI_COVERITY_PRPLOS_PACKAGE_LIST[@]}
|
||||
- cov-build --dir cov-int make -j $(nproc) $compile_packages
|
||||
|
||||
- tar czf cov-int.tar.gz ./cov-int
|
||||
- >
|
||||
curl
|
||||
--form token="$(cat ./coverity-api-token)"
|
||||
--form email=ynezz@true.cz
|
||||
--form file=@cov-int.tar.gz
|
||||
--form version="$CI_COMMIT_SHORT_SHA"
|
||||
--form description="$CI_COMMIT_REF_SLUG"
|
||||
"https://scan.coverity.com/builds?project=$CI_COVERITY_PROJECT_NAME"
|
||||
|
||||
artifacts:
|
||||
expire_in: 1 month
|
||||
when: always
|
||||
paths:
|
||||
- bin
|
||||
- logs
|
||||
- cov-int.tar.gz
|
||||
|
||||
cache:
|
||||
key: openwrt-downloads
|
||||
paths:
|
||||
- dl/
|
||||
57
.gitlab/docker.yml
Normal file
57
.gitlab/docker.yml
Normal file
@@ -0,0 +1,57 @@
|
||||
.docker in docker:
|
||||
tags:
|
||||
- gce
|
||||
image: docker:19.03.7
|
||||
services:
|
||||
- docker:19.03.7-dind
|
||||
rules:
|
||||
- if: '$CI_PIPELINE_SOURCE == "schedule"'
|
||||
when: never
|
||||
- if: '$CI_COMMIT_BRANCH == $CI_DESIGNATED_BRANCH'
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
variables:
|
||||
DOCKER_DRIVER: overlay2
|
||||
DOCKER_TLS_CERTDIR: "/certs"
|
||||
|
||||
.build Docker image:
|
||||
stage: docker
|
||||
extends: .docker in docker
|
||||
script:
|
||||
- export IMAGE_NAME="$(echo $CI_JOB_NAME | sed 's/build Docker image \(.*\)/\1/')"
|
||||
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
|
||||
- docker build -t "$IMAGE_NAME" $DOCKER_BUILD_EXTRA_ARGS $DOCKER_DOCKERFILE_PATH
|
||||
- docker tag "$IMAGE_NAME" "$CI_REGISTRY_IMAGE/$CI_DESIGNATED_BRANCH/$IMAGE_NAME:$CI_COMMIT_REF_SLUG"
|
||||
- docker push "$CI_REGISTRY_IMAGE/$CI_DESIGNATED_BRANCH/$IMAGE_NAME:$CI_COMMIT_REF_SLUG"
|
||||
|
||||
.test Docker SDK image:
|
||||
stage: docker SDK test
|
||||
extends: .docker in docker
|
||||
script:
|
||||
- export IMAGE_NAME="$(echo $CI_JOB_NAME | sed 's/test Docker image \(.*\)/\1/')"
|
||||
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
|
||||
- docker pull "$CI_REGISTRY_IMAGE/$CI_DESIGNATED_BRANCH/$IMAGE_NAME:$CI_COMMIT_REF_SLUG"
|
||||
- >
|
||||
docker run "$CI_REGISTRY_IMAGE/$CI_DESIGNATED_BRANCH/$IMAGE_NAME:$CI_COMMIT_REF_SLUG" sh -c "
|
||||
cd /home/builder &&
|
||||
make defconfig &&
|
||||
./scripts/feeds update base &&
|
||||
./scripts/feeds install busybox &&
|
||||
sync && sleep 1 &&
|
||||
make -j $(nproc) package/busybox/compile V=s &&
|
||||
sync && sleep 1 &&
|
||||
find ./bin/packages -name busybox*.ipk | grep ^./bin/packages/.*busybox
|
||||
"
|
||||
|
||||
.deploy Docker image:
|
||||
extends: .docker in docker
|
||||
rules:
|
||||
- if: '$CI_PIPELINE_SOURCE == "schedule"'
|
||||
when: never
|
||||
- if: '$CI_COMMIT_BRANCH == $CI_DESIGNATED_BRANCH'
|
||||
script:
|
||||
- export IMAGE_NAME="$(echo $CI_JOB_NAME | sed 's/deploy Docker image \(.*\)/\1/')"
|
||||
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
|
||||
- docker pull "$CI_REGISTRY_IMAGE/$CI_DESIGNATED_BRANCH/$IMAGE_NAME:$CI_COMMIT_REF_SLUG"
|
||||
- docker tag "$CI_REGISTRY_IMAGE/$CI_DESIGNATED_BRANCH/$IMAGE_NAME:$CI_COMMIT_REF_SLUG" "$CI_REGISTRY_IMAGE/$CI_DESIGNATED_BRANCH/$IMAGE_NAME:latest"
|
||||
- docker push "$CI_REGISTRY_IMAGE/$CI_DESIGNATED_BRANCH/$IMAGE_NAME:latest"
|
||||
15
.gitlab/docker/README.md
Normal file
15
.gitlab/docker/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# Content
|
||||
|
||||
This directory contains bits for Docker images used on the GitLab CI.
|
||||
|
||||
## builder
|
||||
|
||||
Docker image used for build testing OpenWrt/prplOS.
|
||||
|
||||
## testbed
|
||||
|
||||
Docker image used for runtime testing.
|
||||
|
||||
## sdk
|
||||
|
||||
Docker image with the target SDK.
|
||||
51
.gitlab/docker/builder/Dockerfile
Normal file
51
.gitlab/docker/builder/Dockerfile
Normal file
@@ -0,0 +1,51 @@
|
||||
FROM debian:buster-slim
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN \
|
||||
apt-get update && \
|
||||
apt-get install --yes --no-install-recommends \
|
||||
build-essential \
|
||||
curl \
|
||||
file \
|
||||
gawk \
|
||||
git-core \
|
||||
gosu \
|
||||
iputils-ping \
|
||||
iproute2 \
|
||||
libncurses5-dev \
|
||||
locales \
|
||||
nmap \
|
||||
openssh-client \
|
||||
python-distlib \
|
||||
python-yaml \
|
||||
python3 \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
pwgen \
|
||||
rsync \
|
||||
signify-openbsd \
|
||||
subversion \
|
||||
sudo \
|
||||
unzip \
|
||||
wget && \
|
||||
apt-get -y autoremove && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
||||
ENV LANG=en_US.utf8
|
||||
|
||||
RUN pip3 install -U pip
|
||||
RUN pip3 install \
|
||||
jira \
|
||||
pyyaml \
|
||||
pyjwt
|
||||
|
||||
RUN \
|
||||
echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers && \
|
||||
useradd -c "OpenWrt Builder" -m -d /home/builder -G sudo -s /bin/bash builder
|
||||
|
||||
USER builder
|
||||
ENV HOME /home/builder
|
||||
WORKDIR /home/builder/
|
||||
25
.gitlab/docker/builder/gitlab.yml
Normal file
25
.gitlab/docker/builder/gitlab.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
build Docker image builder:
|
||||
extends: .build Docker image
|
||||
variables:
|
||||
DOCKER_DOCKERFILE_PATH: .gitlab/docker/builder
|
||||
|
||||
test Docker image builder:
|
||||
stage: docker test
|
||||
extends: .docker in docker
|
||||
needs: ["build Docker image builder"]
|
||||
script:
|
||||
- export IMAGE_NAME="$(echo $CI_JOB_NAME | sed 's/test Docker image \(.*\)/\1/')"
|
||||
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
|
||||
- docker pull "$CI_REGISTRY_IMAGE/$CI_DESIGNATED_BRANCH/$IMAGE_NAME:$CI_COMMIT_REF_SLUG"
|
||||
- >
|
||||
docker run --rm "$CI_REGISTRY_IMAGE/$CI_DESIGNATED_BRANCH/$IMAGE_NAME:$CI_COMMIT_REF_SLUG" sh -c "
|
||||
git clone --depth 1 https://gitlab.com/prpl-foundation/prplOS/prplos &&
|
||||
cd prplos && scripts/gen_config.py tplinkC6 prpl &&
|
||||
make -j $(nproc) tools/flock/compile &&
|
||||
python3 -c 'import jira; import jwt'
|
||||
"
|
||||
|
||||
deploy Docker image builder:
|
||||
stage: docker deploy
|
||||
needs: ["test Docker image builder"]
|
||||
extends: .deploy Docker image
|
||||
8
.gitlab/docker/sdk/Dockerfile
Normal file
8
.gitlab/docker/sdk/Dockerfile
Normal file
@@ -0,0 +1,8 @@
|
||||
ARG DESIGNATED_BRANCH=prplos
|
||||
FROM registry.gitlab.com/prpl-foundation/prplos/prplos/$DESIGNATED_BRANCH/builder:latest
|
||||
|
||||
COPY --chown=builder:builder . /home/builder/
|
||||
|
||||
WORKDIR /home/builder
|
||||
RUN mkdir .ssh && ssh-keyscan gitlab.com > .ssh/known_hosts
|
||||
RUN sed -i 's;\(http[s]\?\)://\(.*@\);\1://;g' feeds.conf.default
|
||||
58
.gitlab/docker/sdk/gitlab.yml
Normal file
58
.gitlab/docker/sdk/gitlab.yml
Normal file
@@ -0,0 +1,58 @@
|
||||
build Docker image sdk-intel_mips-xrx500:
|
||||
stage: docker SDK
|
||||
needs: ["build test intel_mips prpl"]
|
||||
variables:
|
||||
DOCKER_DOCKERFILE_PATH: .gitlab/docker/sdk
|
||||
DOCKER_BUILD_EXTRA_ARGS: --build-arg DESIGNATED_BRANCH=$CI_DESIGNATED_BRANCH
|
||||
extends: .build Docker image
|
||||
before_script:
|
||||
- tar xf bin/targets/intel_mips/xrx500/openwrt-sdk-intel_mips-xrx500_gcc-*_musl.Linux-x86_64.tar.xz --strip=1 -C .gitlab/docker/sdk
|
||||
|
||||
test Docker image sdk-intel_mips-xrx500:
|
||||
extends: .test Docker SDK image
|
||||
needs: ["build Docker image sdk-intel_mips-xrx500"]
|
||||
|
||||
deploy Docker image sdk-intel_mips-xrx500:
|
||||
stage: docker SDK deploy
|
||||
extends: .deploy Docker image
|
||||
needs: ["test Docker image sdk-intel_mips-xrx500"]
|
||||
|
||||
|
||||
build Docker image sdk-ipq40xx-generic:
|
||||
stage: docker SDK
|
||||
needs: ["build test ipq40xx prpl"]
|
||||
variables:
|
||||
DOCKER_DOCKERFILE_PATH: .gitlab/docker/sdk
|
||||
DOCKER_BUILD_EXTRA_ARGS: --build-arg DESIGNATED_BRANCH=$CI_DESIGNATED_BRANCH
|
||||
extends: .build Docker image
|
||||
before_script:
|
||||
- tar xf bin/targets/ipq40xx/generic/openwrt-sdk-ipq40xx-generic_gcc*.Linux-x86_64.tar.xz --strip=1 -C .gitlab/docker/sdk
|
||||
|
||||
test Docker image sdk-ipq40xx-generic:
|
||||
extends: .test Docker SDK image
|
||||
needs: ["build Docker image sdk-ipq40xx-generic"]
|
||||
|
||||
deploy Docker image sdk-ipq40xx-generic:
|
||||
stage: docker SDK deploy
|
||||
extends: .deploy Docker image
|
||||
needs: ["test Docker image sdk-ipq40xx-generic"]
|
||||
|
||||
|
||||
build Docker image sdk-mvebu-cortexa9:
|
||||
stage: docker SDK
|
||||
needs: ["build test mvebu prpl"]
|
||||
variables:
|
||||
DOCKER_DOCKERFILE_PATH: .gitlab/docker/sdk
|
||||
DOCKER_BUILD_EXTRA_ARGS: --build-arg DESIGNATED_BRANCH=$CI_DESIGNATED_BRANCH
|
||||
extends: .build Docker image
|
||||
before_script:
|
||||
- tar xf bin/targets/mvebu/cortexa9/openwrt-sdk-mvebu-cortexa9_gcc*Linux-x86_64.tar.xz --strip=1 -C .gitlab/docker/sdk
|
||||
|
||||
test Docker image sdk-mvebu-cortexa9:
|
||||
extends: .test Docker SDK image
|
||||
needs: ["build Docker image sdk-mvebu-cortexa9"]
|
||||
|
||||
deploy Docker image sdk-mvebu-cortexa9:
|
||||
stage: docker SDK deploy
|
||||
extends: .deploy Docker image
|
||||
needs: ["test Docker image sdk-mvebu-cortexa9"]
|
||||
69
.gitlab/docker/testbed/Dockerfile
Normal file
69
.gitlab/docker/testbed/Dockerfile
Normal file
@@ -0,0 +1,69 @@
|
||||
FROM debian:buster-slim
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN \
|
||||
apt-get update && \
|
||||
apt-get install --yes --no-install-recommends \
|
||||
curl \
|
||||
etherwake \
|
||||
git-core \
|
||||
iputils-ping \
|
||||
iproute2 \
|
||||
locales \
|
||||
netcat-openbsd \
|
||||
nmap \
|
||||
openssh-client \
|
||||
python3 \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
sshpass \
|
||||
sudo \
|
||||
tcpdump \
|
||||
xz-utils && \
|
||||
apt-get -y autoremove && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
||||
ENV LANG=en_US.utf8
|
||||
|
||||
ENV LABGRID_COMMIT=ffa994eca22b
|
||||
RUN git clone --branch wip/testbed https://gitlab.com/prpl-foundation/prplOS/labgrid
|
||||
WORKDIR labgrid
|
||||
RUN git reset --hard $LABGRID_COMMIT
|
||||
|
||||
RUN \
|
||||
pip3 install -U pip && \
|
||||
pip3 install -r requirements.txt && \
|
||||
python3 setup.py install && \
|
||||
rm -fr /labgrid
|
||||
|
||||
RUN pip3 install \
|
||||
cdrouter \
|
||||
cram \
|
||||
humanize \
|
||||
jira \
|
||||
'marshmallow<3.0.0' \
|
||||
pyjwt
|
||||
|
||||
ENV TMATE_VERSION=2.4.0
|
||||
ENV TMATE_SHA256SUM=6e503a1a3b0f9117bce6ff7cc30cf61bdc79e9b32d074cf96deb0264e067a60d
|
||||
ENV TMATE_DL_URL=https://github.com/tmate-io/tmate/releases/download
|
||||
|
||||
RUN \
|
||||
curl --location --silent \
|
||||
"$TMATE_DL_URL/$TMATE_VERSION/tmate-${TMATE_VERSION}-static-linux-amd64.tar.xz" > tmate.tar.xz && \
|
||||
sha256sum tmate.tar.xz | grep -q "$TMATE_SHA256SUM" && \
|
||||
tar xf tmate.tar.xz --strip-components=1 -C /usr/local/bin && \
|
||||
rm tmate.tar.xz
|
||||
|
||||
RUN \
|
||||
echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers && \
|
||||
useradd -c "OpenWrt Testbed" -m -d /home/testbed -G sudo -s /bin/bash testbed
|
||||
|
||||
USER testbed
|
||||
ENV HOME /home/testbed
|
||||
WORKDIR /home/testbed/
|
||||
|
||||
VOLUME [ "/home/testbed" ]
|
||||
22
.gitlab/docker/testbed/gitlab.yml
Normal file
22
.gitlab/docker/testbed/gitlab.yml
Normal file
@@ -0,0 +1,22 @@
|
||||
build Docker image testbed:
|
||||
extends: .build Docker image
|
||||
variables:
|
||||
DOCKER_DOCKERFILE_PATH: .gitlab/docker/testbed
|
||||
|
||||
test Docker image testbed:
|
||||
stage: docker test
|
||||
extends: .docker in docker
|
||||
needs: ["build Docker image testbed"]
|
||||
script:
|
||||
- export IMAGE_NAME="$(echo $CI_JOB_NAME | sed 's/test Docker image \(.*\)/\1/')"
|
||||
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
|
||||
- docker pull "$CI_REGISTRY_IMAGE/$CI_DESIGNATED_BRANCH/$IMAGE_NAME:$CI_COMMIT_REF_SLUG"
|
||||
- >
|
||||
docker run --rm "$CI_REGISTRY_IMAGE/$CI_DESIGNATED_BRANCH/$IMAGE_NAME:$CI_COMMIT_REF_SLUG" sh -c "
|
||||
python3 -c 'import labgrid; import yaml; import jira; import jwt; import cdrouter' && tmate -V
|
||||
"
|
||||
|
||||
deploy Docker image testbed:
|
||||
stage: docker deploy
|
||||
extends: .deploy Docker image
|
||||
needs: ["test Docker image testbed"]
|
||||
BIN
.gitlab/mockups/tr181-mockups.tar.gz
Normal file
BIN
.gitlab/mockups/tr181-mockups.tar.gz
Normal file
Binary file not shown.
17
.gitlab/scripts/README.md
Normal file
17
.gitlab/scripts/README.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# Content
|
||||
|
||||
This directory contains files used mainly during testing on the GitLab CI.
|
||||
|
||||
## requirements.txt
|
||||
|
||||
This `requirements.txt` file is used for specifying what Python packages are required to run the scripts in this directory. Usually used with `pip install -f requirements.txt` command.
|
||||
|
||||
## prpl-jira.py
|
||||
|
||||
This helper Python script creates new or updates existing Jira ticket when something goes wrong during GitLab CI testing process. Currently only supports reporting of build test failures.
|
||||
|
||||
**Needs** Python version **3.6+** due to use of f-strings feature.
|
||||
|
||||
## testbed-device.py
|
||||
|
||||
This helper Python scripts allows managing of device under test (DUT) via labgrid framework.
|
||||
18
.gitlab/scripts/cdrouter-pretty-failures.awk
Executable file
18
.gitlab/scripts/cdrouter-pretty-failures.awk
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/usr/bin/awk -f
|
||||
|
||||
/Test cdrouter-[0-9]+:/ {
|
||||
name=$0
|
||||
}
|
||||
|
||||
/SECTION\(cdrouter-[0-9]+\)/ {
|
||||
section=substr($0,index($0,$3))
|
||||
}
|
||||
|
||||
/FAIL: Test .* \([0-9]+\) failed$/ {
|
||||
next;
|
||||
}
|
||||
|
||||
/FAIL:/ {
|
||||
failure=substr($0,index($0,$3))
|
||||
print name"\n\x1b[33m "section"\n\x1b[1;31m "failure"\n\x1b[0m"
|
||||
}
|
||||
454
.gitlab/scripts/prpl-jira.py
Executable file
454
.gitlab/scripts/prpl-jira.py
Executable file
@@ -0,0 +1,454 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
import sys
|
||||
import os
|
||||
import glob
|
||||
import json
|
||||
import fnmatch
|
||||
import logging
|
||||
import argparse
|
||||
|
||||
from jira import JIRA
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass
|
||||
class BuildLogFailure:
|
||||
path: str
|
||||
step: str
|
||||
name: str
|
||||
tail_log: list
|
||||
|
||||
|
||||
class OpenWrtSystemInfo:
|
||||
def __init__(self, filename):
|
||||
with open(filename, "r", errors="ignore") as read_file:
|
||||
self.data = json.load(read_file)
|
||||
|
||||
@property
|
||||
def kernel(self):
|
||||
return self.data["kernel"]
|
||||
|
||||
@property
|
||||
def board_name(self):
|
||||
n = self.data["board_name"]
|
||||
return n.replace(",", "-")
|
||||
|
||||
@property
|
||||
def target(self):
|
||||
return self.data["release"]["target"]
|
||||
|
||||
@property
|
||||
def distribution(self):
|
||||
return self.data["release"]["distribution"]
|
||||
|
||||
@property
|
||||
def revision(self):
|
||||
return self.data["release"]["revision"]
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
return self.data["release"]["version"]
|
||||
|
||||
def sanitize(self, s):
|
||||
return s.replace("+", "-").replace("/", "-").replace(" ", "_")
|
||||
|
||||
def as_tags(self):
|
||||
return self.sanitize(
|
||||
"kernel_{},board_{},target_{},revision_{},version_{},distro_{}".format(
|
||||
self.kernel,
|
||||
self.board_name,
|
||||
self.target,
|
||||
self.revision,
|
||||
self.version,
|
||||
self.distribution,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class RunLogAnalyzer:
|
||||
def __init__(self, log_dir=os.getcwd()):
|
||||
self._log_dir = log_dir
|
||||
self._log_file_patterns = {
|
||||
"cram": "cram-result-*.txt",
|
||||
"console": "console_*.txt",
|
||||
"system_info": "system-*.json",
|
||||
}
|
||||
self._log_files = {}
|
||||
self.analyze_logs()
|
||||
|
||||
def analyze_logs(self):
|
||||
for filename in os.listdir(self._log_dir):
|
||||
for kind, pattern in self._log_file_patterns.items():
|
||||
if fnmatch.fnmatch(filename, pattern):
|
||||
self._log_files[kind] = filename
|
||||
|
||||
def logfile_content(self, kind):
|
||||
path = self.logfile_path(kind)
|
||||
if not path:
|
||||
return
|
||||
|
||||
with open(self.logfile_path(kind), errors="ignore") as f:
|
||||
return f.read()
|
||||
|
||||
def logfile_path(self, kind):
|
||||
filename = self._log_files.get(kind)
|
||||
if not filename:
|
||||
return
|
||||
|
||||
return os.path.join(self._log_dir, filename)
|
||||
|
||||
def log_files(self):
|
||||
return self._log_files
|
||||
|
||||
def logfile_tail_content(self, kind, count=10):
|
||||
path = self.logfile_path(kind)
|
||||
if not path:
|
||||
return
|
||||
|
||||
bufsize = 8192
|
||||
fsize = os.stat(path).st_size
|
||||
|
||||
n = 0
|
||||
with open(path, errors="ignore") as f:
|
||||
if bufsize > fsize:
|
||||
bufsize = fsize - 1
|
||||
|
||||
data = []
|
||||
while True:
|
||||
n += 1
|
||||
f.seek(fsize - bufsize * n)
|
||||
data.extend(f.readlines())
|
||||
if len(data) >= count or f.tell() == 0:
|
||||
return "".join(data[-count:])
|
||||
|
||||
def system_info(self):
|
||||
path = self.logfile_path("system_info")
|
||||
if not path:
|
||||
return
|
||||
|
||||
return OpenWrtSystemInfo(path)
|
||||
|
||||
def failures_jira_string(self):
|
||||
r = ""
|
||||
cram_log = self.logfile_content("cram")
|
||||
console_log = self.logfile_tail_content("console")
|
||||
system_info = self.system_info()
|
||||
|
||||
if system_info:
|
||||
r += "Device under test provided following system details:\n"
|
||||
r += f" * Board: {system_info.board_name}\n"
|
||||
r += f" * Target: {system_info.target}\n"
|
||||
r += f" * Version: {system_info.version}\n"
|
||||
r += f" * Revision: {system_info.revision}\n"
|
||||
r += f" * Kernel: {system_info.kernel}\n"
|
||||
|
||||
if cram_log:
|
||||
r += "{code:title=Cram test results}\n"
|
||||
r += cram_log
|
||||
r += "{code}\n"
|
||||
|
||||
if console_log:
|
||||
r += "{code:title=Console log (last 10 lines)}\n"
|
||||
r += console_log
|
||||
r += "{code}\n"
|
||||
|
||||
return r
|
||||
|
||||
|
||||
class BuildLogAnalyzer:
|
||||
# make[3]: *** [Makefile:154: /builds/swpal_6x-uci-1.0.0.1.tar.bz2] Error 128
|
||||
RE_MAKE_ERROR = re.compile(r"make\[\d+\]: \*\*\* (.*) Error")
|
||||
|
||||
# Makefile:563: recipe for target 'lldpcli' failed
|
||||
RE_MAKE_RECIPE_FAILED = re.compile(
|
||||
r"^Makefile:\d+: recipe for target '(.*)' failed"
|
||||
)
|
||||
|
||||
def __init__(self, log_dir):
|
||||
self.log_dir = log_dir
|
||||
self._failures = []
|
||||
self.analyze_logs()
|
||||
|
||||
def get_tail_log(self, path):
|
||||
tail_log = []
|
||||
re_matchers = [self.RE_MAKE_ERROR, self.RE_MAKE_RECIPE_FAILED]
|
||||
|
||||
def add_tail_log(line):
|
||||
tail_log_goal = 4
|
||||
tail_log.append(line)
|
||||
truncate = len(tail_log) - tail_log_goal
|
||||
if truncate > 0:
|
||||
return tail_log[truncate:]
|
||||
return tail_log
|
||||
|
||||
with open(path, "r", errors="ignore") as fd:
|
||||
for line in fd:
|
||||
line = line.rstrip()
|
||||
if len(line) == 0:
|
||||
continue
|
||||
|
||||
tail_log = add_tail_log(line)
|
||||
|
||||
for matcher in re_matchers:
|
||||
if matcher.match(line):
|
||||
return tail_log
|
||||
|
||||
def process_log_file(self, path):
|
||||
path = os.path.normpath(path)
|
||||
filename = os.path.basename(path)
|
||||
dirname = os.path.dirname(path)
|
||||
package = dirname.split(os.sep)[-1]
|
||||
(step, _) = os.path.splitext(filename)
|
||||
|
||||
if step == "check-compile":
|
||||
return
|
||||
|
||||
tail_log = self.get_tail_log(path)
|
||||
if not tail_log:
|
||||
return
|
||||
|
||||
self._failures.append(BuildLogFailure(path, step, package, tail_log))
|
||||
|
||||
def analyze_logs(self):
|
||||
glob_pattern = os.path.join(self.log_dir, "**/**.txt")
|
||||
for item in glob.iglob(glob_pattern, recursive=True):
|
||||
if os.path.isfile(item):
|
||||
self.process_log_file(item)
|
||||
|
||||
def failures(self):
|
||||
return self._failures
|
||||
|
||||
def failures_jira_string(self):
|
||||
if not self._failures:
|
||||
return ""
|
||||
|
||||
r = "\nSeems like there are build issues with following items:\n"
|
||||
for f in self._failures:
|
||||
r += f" * {f.name} ({f.step}):\n"
|
||||
for line in f.tail_log:
|
||||
r += f" ** {line}\n"
|
||||
r += "\n"
|
||||
|
||||
return r
|
||||
|
||||
|
||||
class JiraHelper:
|
||||
def __init__(self, args):
|
||||
self.args = args
|
||||
self.login()
|
||||
|
||||
def login(self):
|
||||
cert_data = None
|
||||
args = self.args
|
||||
|
||||
with open(args.private_key, "r", errors="ignore") as cert_file:
|
||||
cert_data = cert_file.read()
|
||||
|
||||
oauth_dict = {
|
||||
"access_token": args.access_token,
|
||||
"access_token_secret": args.access_token_secret,
|
||||
"consumer_key": args.consumer_key,
|
||||
"key_cert": cert_data,
|
||||
}
|
||||
|
||||
self.jira = JIRA({"server": args.instance_url}, oauth=oauth_dict)
|
||||
|
||||
def create_or_update_issue(self, failure_type, failure_details):
|
||||
args = self.args
|
||||
job = os.getenv("CI_JOB_NAME", "job")
|
||||
project = os.getenv("CI_PROJECT_NAME", "project")
|
||||
project_url = os.getenv("CI_PROJECT_URL", "https://project_url")
|
||||
branch = os.getenv("CI_COMMIT_BRANCH", "branch")
|
||||
job_url = os.getenv("CI_JOB_URL", "https://job_url")
|
||||
commit = os.getenv("CI_COMMIT_SHORT_SHA", "commit_sha")
|
||||
commit_message = os.getenv("CI_COMMIT_MESSAGE", "foor bar baz")
|
||||
|
||||
summary = f"CI {failure_type} failure in {project}/{branch} during {job}"
|
||||
|
||||
description = (
|
||||
f"Just noticed {failure_type} failure during execution of "
|
||||
f"[{job}|{job_url}] CI job in _{project}/{branch}_ which is now at "
|
||||
f"[{commit}|{project_url}/-/commit/{commit}] commit:"
|
||||
"{noformat}"
|
||||
f"{commit_message}"
|
||||
"{noformat}"
|
||||
f"{failure_details}"
|
||||
)
|
||||
|
||||
if args.dry_run:
|
||||
logging.info(description)
|
||||
|
||||
jql = f"""
|
||||
project={args.project} AND resolution = Unresolved AND summary ~ '{summary}'
|
||||
"""
|
||||
existing_issue = self.jira.search_issues(jql, maxResults=1)
|
||||
if existing_issue:
|
||||
existing_issue = self.jira.issue(existing_issue[0].key)
|
||||
logging.info(
|
||||
f"Updating ({existing_issue.key}) {existing_issue.fields.summary}"
|
||||
)
|
||||
|
||||
if not args.dry_run:
|
||||
self.jira.add_comment(existing_issue, description)
|
||||
|
||||
return existing_issue
|
||||
|
||||
if args.dry_run:
|
||||
logging.info(f"Would create new issue: '{self.args.project}/{summary}'")
|
||||
return
|
||||
|
||||
new_issue = self.jira.create_issue(
|
||||
project=self.args.project,
|
||||
summary=summary,
|
||||
description=description,
|
||||
issuetype={"name": "Bug"},
|
||||
fixVersions=[{"name": "1.1"}],
|
||||
components=[{"name": "CI"}],
|
||||
)
|
||||
|
||||
logging.info(f"Created ({new_issue.key}) {new_issue.fields.summary}")
|
||||
return new_issue
|
||||
|
||||
def build_failure(self):
|
||||
dry_run = self.args.dry_run
|
||||
commit = os.getenv("CI_COMMIT_SHORT_SHA", "commit_sha")
|
||||
log_analyzer = BuildLogAnalyzer(self.args.build_logs_dir)
|
||||
failures = log_analyzer.failures()
|
||||
|
||||
issue = self.create_or_update_issue(
|
||||
"build", log_analyzer.failures_jira_string()
|
||||
)
|
||||
|
||||
if not failures:
|
||||
return
|
||||
|
||||
if not dry_run and not issue:
|
||||
return
|
||||
|
||||
for failure in failures:
|
||||
filename = f"{failure.name}_{failure.step}_{commit}.log"
|
||||
|
||||
if dry_run:
|
||||
logging.info(f"Would add attachment {filename} from {failure.path})")
|
||||
continue
|
||||
|
||||
logging.info(f"Adding attachment {filename} from {failure.path})")
|
||||
self.jira.add_attachment(
|
||||
issue=issue, attachment=failure.path, filename=filename
|
||||
)
|
||||
|
||||
def run_failure(self):
|
||||
dry_run = self.args.dry_run
|
||||
commit = os.getenv("CI_COMMIT_SHORT_SHA", "commit_sha")
|
||||
log_analyzer = RunLogAnalyzer()
|
||||
log_files = log_analyzer.log_files()
|
||||
|
||||
issue = self.create_or_update_issue("run", log_analyzer.failures_jira_string())
|
||||
|
||||
if not log_files:
|
||||
return
|
||||
|
||||
if not dry_run and not issue:
|
||||
return
|
||||
|
||||
for kind, path in log_files.items():
|
||||
filename = f"{kind}_{commit}.log"
|
||||
|
||||
if dry_run:
|
||||
logging.info(f"Would add attachment {filename} from {path})")
|
||||
continue
|
||||
|
||||
logging.info(f"Adding attachment {filename} from {path})")
|
||||
self.jira.add_attachment(issue=issue, attachment=path, filename=filename)
|
||||
|
||||
|
||||
def main():
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, format="%(levelname)7s: %(message)s", stream=sys.stderr
|
||||
)
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-d", "--debug", action="store_true", help="enable debug mode")
|
||||
parser.add_argument(
|
||||
"--dry-run",
|
||||
action="store_true",
|
||||
help="Do not alter Jira content, just printout what would be done",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--access-token",
|
||||
type=str,
|
||||
default=os.environ.get("JIRA_ACCESS_TOKEN"),
|
||||
help="Jira access token (default: %(default)s)",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--access-token-secret",
|
||||
type=str,
|
||||
default=os.environ.get("JIRA_ACCESS_TOKEN_SECRET"),
|
||||
help="Jira access token secret",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--consumer-key",
|
||||
type=str,
|
||||
default=os.environ.get("JIRA_CONSUMER_KEY"),
|
||||
help="Jira consumer key",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--instance-url",
|
||||
type=str,
|
||||
default=os.environ.get("JIRA_INSTANCE_URL", "https://jira.prplfoundation.org"),
|
||||
help="Jira instance URL (default: %(default)s)",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--private-key",
|
||||
type=str,
|
||||
default=os.environ.get("JIRA_PRIVATE_KEY"),
|
||||
help="Jira private key",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--project",
|
||||
type=str,
|
||||
default=os.environ.get("JIRA_PROJECT", "PCF"),
|
||||
help="Jira target project (default: %(default)s)",
|
||||
)
|
||||
|
||||
subparsers = parser.add_subparsers(dest="command", title="available subcommands")
|
||||
subparser = subparsers.add_parser("build_failure", help="report build failure")
|
||||
subparser.add_argument(
|
||||
"--build-logs-dir",
|
||||
type=str,
|
||||
default=os.path.join(os.getcwd(), "logs"),
|
||||
help="Path to directory which contains build logs",
|
||||
)
|
||||
subparser.set_defaults(func=JiraHelper.build_failure)
|
||||
|
||||
subparser = subparsers.add_parser("run_failure", help="report run failure")
|
||||
subparser.add_argument(
|
||||
"--target",
|
||||
type=str,
|
||||
help="Target on which runtime testing happend",
|
||||
)
|
||||
subparser.set_defaults(func=JiraHelper.run_failure)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.debug:
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
|
||||
if not args.command:
|
||||
print("command is missing")
|
||||
exit(1)
|
||||
|
||||
prpl_jira = JiraHelper(args)
|
||||
args.func(prpl_jira)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
16
.gitlab/scripts/requirements.txt
Normal file
16
.gitlab/scripts/requirements.txt
Normal file
@@ -0,0 +1,16 @@
|
||||
jira
|
||||
pyyaml
|
||||
|
||||
# labgrid
|
||||
attrs==19.3.0
|
||||
jinja2==2.10.3
|
||||
pexpect==4.7
|
||||
https://github.com/labgrid-project/pyserial/archive/v3.4.0.1.zip#egg=pyserial
|
||||
pytest==5.3.1
|
||||
pyudev==0.21.0
|
||||
requests==2.22.0
|
||||
xmodem==0.4.5
|
||||
autobahn==19.11.1
|
||||
PyYAML==5.1.2
|
||||
ansicolors==1.1.8
|
||||
pyusb==1.0.2
|
||||
433
.gitlab/scripts/testbed-cdrouter.py
Executable file
433
.gitlab/scripts/testbed-cdrouter.py
Executable file
@@ -0,0 +1,433 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import humanize
|
||||
|
||||
from types import SimpleNamespace
|
||||
from http.client import RemoteDisconnected
|
||||
from requests.exceptions import ConnectionError
|
||||
|
||||
from cdrouter import CDRouter
|
||||
from cdrouter.jobs import Job, Options
|
||||
from cdrouter.configs import Config
|
||||
from cdrouter.cdr_error import CDRouterError
|
||||
|
||||
|
||||
class OpenWrtSystemInfo:
|
||||
def __init__(self, filename):
|
||||
with open(filename, "r") as read_file:
|
||||
self.data = json.load(read_file)
|
||||
|
||||
@property
|
||||
def kernel(self):
|
||||
return self.data["kernel"]
|
||||
|
||||
@property
|
||||
def board_name(self):
|
||||
board_name = self.data["board_name"]
|
||||
fixup = {
|
||||
"EASY350 ANYWAN (GRX350) Axepoint Asurada model": "nec-wx3000hp",
|
||||
"EASY350 ANYWAN (GRX350) Main model": "nec-wx3000hp",
|
||||
}
|
||||
board_name = fixup.get(board_name, board_name)
|
||||
return board_name.replace(",", "-")
|
||||
|
||||
@property
|
||||
def target(self):
|
||||
return self.data["release"]["target"]
|
||||
|
||||
@property
|
||||
def distribution(self):
|
||||
return self.data["release"]["distribution"]
|
||||
|
||||
@property
|
||||
def revision(self):
|
||||
return self.data["release"]["revision"]
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
return self.data["release"]["version"]
|
||||
|
||||
def sanitize(self, s):
|
||||
return s.replace("+", "-").replace("/", "-")
|
||||
|
||||
def as_tags(self):
|
||||
return self.sanitize(
|
||||
"kernel_{},board_{},target_{},revision_{},version_{},distro_{}".format(
|
||||
self.kernel,
|
||||
self.board_name,
|
||||
self.target,
|
||||
self.revision,
|
||||
self.version,
|
||||
self.distribution,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class TestbedCDRouter:
|
||||
def __init__(self, args):
|
||||
self.args = args
|
||||
self.configs_path = os.path.join(self.args.root_dir, "configurations")
|
||||
self.packages_path = os.path.join(self.args.root_dir, "packages")
|
||||
|
||||
def connect(self):
|
||||
api_token = os.getenv("CDROUTER_API_TOKEN")
|
||||
api_url = os.getenv("CDROUTER_API_URL")
|
||||
|
||||
if not api_token or not api_url:
|
||||
logging.error("API token or URL is missing")
|
||||
exit(1)
|
||||
|
||||
logging.debug("Connecting to {}".format(api_url))
|
||||
self.cdr = CDRouter(api_url, token=api_token)
|
||||
|
||||
def job_launch(self):
|
||||
p = self.cdr.packages.get_by_name(self.args.package_name)
|
||||
logging.info(
|
||||
"Using test package '{name}' with id {id} and {test_count} tests defined.".format(
|
||||
name=p.name, id=p.id, test_count=p.test_count
|
||||
)
|
||||
)
|
||||
|
||||
p = self.cdr.packages.get(p.id)
|
||||
config = self.cdr.configs.get_by_name(self.args.configuration)
|
||||
device = self.cdr.devices.get_by_name(self.args.device)
|
||||
if config:
|
||||
p.config_id = config.id
|
||||
if device:
|
||||
p.device_id = device.id
|
||||
if config or device:
|
||||
self.cdr.packages.edit(p)
|
||||
|
||||
a = self.cdr.packages.analyze(p.id)
|
||||
self.job_total = a.run_count
|
||||
logging.info(
|
||||
"Package '{name}' will run {run_count} tests and skip {skipped_count} tests.".format(
|
||||
name=p.name, run_count=a.run_count, skipped_count=a.skipped_count
|
||||
)
|
||||
)
|
||||
|
||||
for test in a.skipped_tests:
|
||||
logging.info(
|
||||
"Skipping test '{synopsis}' due to '{skip_name}'.".format(
|
||||
synopsis=test.synopsis, skip_name=test.skip_name
|
||||
)
|
||||
)
|
||||
|
||||
tags = None
|
||||
if self.args.system_info:
|
||||
tags = OpenWrtSystemInfo(self.args.system_info).as_tags().split(",")
|
||||
|
||||
if self.args.tags:
|
||||
tags += self.args.tags.split(",")
|
||||
|
||||
options = Options(tags=tags)
|
||||
job = Job(package_id=p.id, options=options)
|
||||
job = self.cdr.jobs.launch(job)
|
||||
while job.result_id is None:
|
||||
time.sleep(1)
|
||||
job = self.cdr.jobs.get(job.id)
|
||||
|
||||
self.job = job
|
||||
|
||||
def job_progress(self):
|
||||
current = None
|
||||
job = self.cdr.jobs.get(self.job.id)
|
||||
while job.status == "running":
|
||||
|
||||
updates = self.cdr.results.updates(job.result_id)
|
||||
running = updates.running
|
||||
progress = updates.progress
|
||||
|
||||
if not running or not progress:
|
||||
job = self.cdr.jobs.get(self.job.id)
|
||||
continue
|
||||
|
||||
description = running.description
|
||||
if current == description:
|
||||
job = self.cdr.jobs.get(self.job.id)
|
||||
continue
|
||||
|
||||
current = description
|
||||
|
||||
logging.info(
|
||||
"Running test {finished}/{total} ({percent}%) '{description}'".format(
|
||||
description=description,
|
||||
percent=progress.progress,
|
||||
finished=progress.finished,
|
||||
total=self.job_total,
|
||||
)
|
||||
)
|
||||
|
||||
time.sleep(5)
|
||||
job = self.cdr.jobs.get(self.job.id)
|
||||
|
||||
def job_result(self, job_id=None):
|
||||
job_id = job_id or self.job.result_id
|
||||
r = self.cdr.results.get(job_id)
|
||||
|
||||
buf, filename = self.cdr.results.download_logdir_archive(job_id, format="tgz")
|
||||
logging.info("Exporting logdir archive {}".format(filename))
|
||||
with open(filename, "wb") as f:
|
||||
f.write(buf.getvalue())
|
||||
|
||||
logging.info(
|
||||
"Test job finished as '{}' after {}.".format(
|
||||
r.status, humanize.naturaldelta(r.duration)
|
||||
)
|
||||
)
|
||||
logging.info(
|
||||
"Run {} tests, which {} failed and {} passed.".format(
|
||||
r.tests, r.fail, r.passed
|
||||
)
|
||||
)
|
||||
|
||||
if r.fail == 0 and r.status == "completed":
|
||||
logging.info("Success!")
|
||||
exit(0)
|
||||
|
||||
if r.fail == 0:
|
||||
exit(1)
|
||||
|
||||
logging.error("{:=^50}".format(" [ FAILED TESTS ] "))
|
||||
for test in self.cdr.tests.iter_list(r.id, filter=["result=fail"]):
|
||||
logging.error("{} ({})".format(test.description, test.result))
|
||||
|
||||
exit(1)
|
||||
|
||||
def package_run(self):
|
||||
self.connect()
|
||||
self.job_launch()
|
||||
self.job_progress()
|
||||
self.job_result()
|
||||
|
||||
def netif_available(self, name):
|
||||
for netif in self.cdr.system.interfaces():
|
||||
if netif.name == name:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def wait_for_netif(self):
|
||||
self.connect()
|
||||
|
||||
name = self.args.name
|
||||
timeout = time.time() + self.args.timeout
|
||||
|
||||
while True:
|
||||
if self.netif_available(name):
|
||||
logging.info("Interface {} is available.".format(name))
|
||||
exit(0)
|
||||
|
||||
if time.time() > timeout:
|
||||
logging.error(
|
||||
"Interface {} is not available after {}".format(
|
||||
name, humanize.naturaldelta(self.args.timeout)
|
||||
)
|
||||
)
|
||||
exit(1)
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
def package_stop(self):
|
||||
self.connect()
|
||||
|
||||
f = ["status~(running|paused)"]
|
||||
pkg_name = self.args.package_name
|
||||
if pkg_name:
|
||||
f.append("package_name={}".format(pkg_name))
|
||||
|
||||
for r in self.cdr.results.iter_list(filter=f):
|
||||
self.cdr.results.stop(r.id)
|
||||
logging.info(
|
||||
"Stopped '{}' package which was {}".format(r.package_name, r.status)
|
||||
)
|
||||
|
||||
def package_export(self):
|
||||
self.connect()
|
||||
name = self.args.name
|
||||
p = self.cdr.packages.get_by_name(name)
|
||||
buf, filename = self.cdr.packages.export(p.id)
|
||||
filename = "{}.gz".format(self.args.filename or name)
|
||||
self.file_save(self.packages_path, buf.getvalue(), filename)
|
||||
|
||||
def package_import(self):
|
||||
self.connect()
|
||||
archive = os.path.join(self.packages_path, self.args.filename)
|
||||
|
||||
with open(archive, "rb+") as fd:
|
||||
si = self.cdr.imports.stage_import_from_file(fd)
|
||||
|
||||
filename = re.sub(r"\.gz$", "", self.args.filename)
|
||||
name = self.args.name or filename
|
||||
|
||||
req = self.cdr.imports.get_commit_request(si.id)
|
||||
for id in req.packages:
|
||||
p = req.packages[id]
|
||||
p.name = name
|
||||
logging.debug("Going to import package '{}'".format(name))
|
||||
p.should_import = True
|
||||
|
||||
resp = self.cdr.imports.commit(si.id, req)
|
||||
for id in resp.packages:
|
||||
r = resp.packages[id].response
|
||||
if not r.imported:
|
||||
logging.error("Import of '{}' failed: {}".format(r.name, r.message))
|
||||
exit(1)
|
||||
|
||||
logging.info(
|
||||
"Imported package '{}' from '{}'".format(r.name, self.args.filename)
|
||||
)
|
||||
|
||||
def file_save(self, path, content, filename=None):
|
||||
filename = filename or self.args.filename
|
||||
os.makedirs(path, exist_ok=True)
|
||||
dest = os.path.join(path, filename)
|
||||
with open(dest, "wb") as f:
|
||||
f.write(content)
|
||||
|
||||
def config_export(self):
|
||||
self.connect()
|
||||
c = self.cdr.configs.get_by_name(self.args.name)
|
||||
content = self.cdr.configs.get_plaintext(c.id)
|
||||
self.file_save(self.configs_path, content.encode())
|
||||
|
||||
def file_content(self, path, filename=None):
|
||||
content = None
|
||||
filename = filename or self.args.filename
|
||||
src = os.path.join(path, filename)
|
||||
with open(src, "rb") as f:
|
||||
content = f.read()
|
||||
return content
|
||||
|
||||
def config_check(self):
|
||||
self.connect()
|
||||
content = self.file_content(self.configs_path)
|
||||
check = self.cdr.configs.check_config(content)
|
||||
if not check.errors:
|
||||
logging.info("OK, no errors!")
|
||||
exit(0)
|
||||
|
||||
for error in check.errors:
|
||||
logging.error(
|
||||
"{}: {}: {}".format(
|
||||
self.args.filename, ",".join(error.lines), error.error
|
||||
)
|
||||
)
|
||||
|
||||
exit(1)
|
||||
|
||||
def config_import(self):
|
||||
self.connect()
|
||||
name = self.args.name or self.args.filename
|
||||
|
||||
try:
|
||||
c = self.cdr.configs.get_by_name(name)
|
||||
self.cdr.configs.delete(c.id)
|
||||
logging.debug("Deleted already existing config '{}'".format(c.name))
|
||||
except CDRouterError:
|
||||
pass
|
||||
|
||||
content = self.file_content(self.configs_path)
|
||||
config = Config(contents=content, name=name)
|
||||
self.cdr.configs.create(config)
|
||||
logging.info("Imported config '{}' from '{}'".format(name, self.args.filename))
|
||||
|
||||
|
||||
def main():
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, format="%(levelname)7s: %(message)s", stream=sys.stderr
|
||||
)
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"-r",
|
||||
"--root-dir",
|
||||
type=str,
|
||||
default=os.environ.get("TB_CDROUTER_ROOT", ".testbed/cdrouter"),
|
||||
help="CDRouter root directory (default: %(default)s)",
|
||||
)
|
||||
parser.add_argument("-d", "--debug", action="store_true", help="enable debug mode")
|
||||
|
||||
subparsers = parser.add_subparsers(dest="command", title="available subcommands")
|
||||
subparser = subparsers.add_parser("package_run", help="run package")
|
||||
subparser.add_argument("package_name", help="name of the testing package")
|
||||
subparser.add_argument("-t", "--tags", help="additional tags for the result")
|
||||
subparser.add_argument("-d", "--device", help="device used for testing")
|
||||
subparser.add_argument(
|
||||
"-c", "--configuration", help="configuration used for testing"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-s",
|
||||
"--system-info",
|
||||
help="JSON file with system information for additional tags",
|
||||
)
|
||||
subparser.set_defaults(func=TestbedCDRouter.package_run)
|
||||
|
||||
subparser = subparsers.add_parser(
|
||||
"package_stop", help="stop running/paused package"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-p", "--package-name", help="name of the package (default: any)"
|
||||
)
|
||||
subparser.set_defaults(func=TestbedCDRouter.package_stop)
|
||||
|
||||
subparser = subparsers.add_parser("package_export", help="export package")
|
||||
subparser.add_argument("name", help="package name")
|
||||
subparser.add_argument(
|
||||
"-f", "--filename", help="destination filename (default: name)"
|
||||
)
|
||||
subparser.set_defaults(func=TestbedCDRouter.package_export)
|
||||
|
||||
subparser = subparsers.add_parser("package_import", help="package configuration")
|
||||
subparser.add_argument("filename", help="package filename")
|
||||
subparser.add_argument("-n", "--name", help="package name, (default: filename)")
|
||||
subparser.set_defaults(func=TestbedCDRouter.package_import)
|
||||
|
||||
subparser = subparsers.add_parser("config_export", help="export configuration")
|
||||
subparser.add_argument("name", help="configuration name")
|
||||
subparser.add_argument("filename", help="destination filename")
|
||||
subparser.set_defaults(func=TestbedCDRouter.config_export)
|
||||
|
||||
subparser = subparsers.add_parser("config_check", help="check configuration")
|
||||
subparser.add_argument("filename", help="config filename")
|
||||
subparser.set_defaults(func=TestbedCDRouter.config_check)
|
||||
|
||||
subparser = subparsers.add_parser("config_import", help="import configuration")
|
||||
subparser.add_argument("filename", help="config filename")
|
||||
subparser.add_argument("-n", "--name", help="config name, (default: filename)")
|
||||
subparser.set_defaults(func=TestbedCDRouter.config_import)
|
||||
|
||||
subparser = subparsers.add_parser(
|
||||
"wait_for_netif", help="wait for network interface"
|
||||
)
|
||||
subparser.add_argument("name", help="interface name")
|
||||
subparser.add_argument(
|
||||
"-t",
|
||||
"--timeout",
|
||||
type=int,
|
||||
default=60,
|
||||
help="wait duration in seconds (default: %(default)s)",
|
||||
)
|
||||
subparser.set_defaults(func=TestbedCDRouter.wait_for_netif)
|
||||
|
||||
args = parser.parse_args()
|
||||
if args.debug:
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
|
||||
if not args.command:
|
||||
print("command is missing")
|
||||
exit(1)
|
||||
|
||||
cdr = TestbedCDRouter(args)
|
||||
args.func(cdr)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
140
.gitlab/scripts/testbed-device.py
Executable file
140
.gitlab/scripts/testbed-device.py
Executable file
@@ -0,0 +1,140 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
|
||||
from labgrid import Environment, StepReporter
|
||||
from labgrid.consoleloggingreporter import ConsoleLoggingReporter
|
||||
|
||||
|
||||
class TestbedDevice:
|
||||
def __init__(self, args):
|
||||
self.args = args
|
||||
self.env = Environment(config_file=self.args.config)
|
||||
ConsoleLoggingReporter.start(args.console_logpath)
|
||||
self.target = self.env.get_target(args.target)
|
||||
|
||||
def boot_into(self):
|
||||
strategy = self.target.get_driver("UBootStrategy")
|
||||
dest = self.args.destination
|
||||
if dest == "shell":
|
||||
strategy.transition("shell")
|
||||
if dest == "bootloader":
|
||||
strategy.transition("uboot")
|
||||
|
||||
def power(self):
|
||||
power = self.target.get_driver("PowerProtocol")
|
||||
action = self.args.action
|
||||
if action == "on":
|
||||
power.on()
|
||||
if action == "off":
|
||||
power.off()
|
||||
if action == "cycle":
|
||||
power.cycle()
|
||||
|
||||
def check_network(self):
|
||||
host = self.args.remote_host
|
||||
network = self.args.network
|
||||
shell = self.target.get_driver("ShellDriver")
|
||||
shell.wait_for(
|
||||
'ifstatus {} | jsonfilter -qe "@.up" || true'.format(network), "true", 60.0
|
||||
)
|
||||
|
||||
shell.wait_for("ping -c1 {} || true".format(host), ", 0% packet loss", 90.0)
|
||||
|
||||
|
||||
def main():
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, format="%(levelname)7s: %(message)s", stream=sys.stderr
|
||||
)
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"-c",
|
||||
"--config",
|
||||
type=str,
|
||||
default=os.environ.get("TB_CONFIG", ".testbed/labgrid/default.yaml"),
|
||||
help="config file (default: %(default)s)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-t",
|
||||
"--target",
|
||||
type=str,
|
||||
default=os.environ.get("TB_TARGET", None),
|
||||
help="target device",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-o",
|
||||
"--console-logpath",
|
||||
type=str,
|
||||
default=os.environ.get("TB_CONSOLE_LOGPATH", os.getcwd()),
|
||||
help="path for console logfile (default: %(default)s)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="count",
|
||||
default=int(os.environ.get("TB_VERBOSE", 0)),
|
||||
help="enable verbose mode",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--debug",
|
||||
action="store_true",
|
||||
default=os.environ.get("TB_DEBUG"),
|
||||
help="enable debug mode",
|
||||
)
|
||||
|
||||
subparsers = parser.add_subparsers(dest="command", title="available subcommands")
|
||||
|
||||
subparser = subparsers.add_parser("power", help="control target power")
|
||||
subparser.add_argument(
|
||||
"action", choices=["on", "off", "cycle"], help="power on/off/cycle target"
|
||||
)
|
||||
subparser.set_defaults(func=TestbedDevice.power)
|
||||
|
||||
subparser = subparsers.add_parser("boot_into", help="boot target into console")
|
||||
subparser.add_argument(
|
||||
"destination",
|
||||
choices=["shell", "bootloader"],
|
||||
help="boot target either into system shell or bootloader console",
|
||||
)
|
||||
subparser.set_defaults(func=TestbedDevice.boot_into)
|
||||
|
||||
subparser = subparsers.add_parser(
|
||||
"check_network", help="ensure that network is usable"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-r",
|
||||
"--remote-host",
|
||||
default="192.168.1.2",
|
||||
help="remote host used for ping check (default: %(default)s)",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-n", "--network", default="lan", help="target network (default: %(default)s)"
|
||||
)
|
||||
subparser.set_defaults(func=TestbedDevice.check_network)
|
||||
|
||||
args = parser.parse_args()
|
||||
if args.verbose >= 1:
|
||||
StepReporter.start()
|
||||
|
||||
if args.debug:
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
|
||||
if not args.target:
|
||||
print("target device name is mandatory")
|
||||
exit(1)
|
||||
|
||||
if not args.command:
|
||||
print("command is missing")
|
||||
exit(1)
|
||||
|
||||
device = TestbedDevice(args)
|
||||
args.func(device)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
174
.gitlab/sdk.yml
Normal file
174
.gitlab/sdk.yml
Normal file
@@ -0,0 +1,174 @@
|
||||
variables:
|
||||
CI_DESIGNATED_BRANCH: prplos
|
||||
|
||||
stages:
|
||||
- generate
|
||||
- build
|
||||
|
||||
.build feed with SDK:
|
||||
stage: build
|
||||
image: $CI_SDK_IMAGE
|
||||
variables:
|
||||
CI_SDK_TOPDIR: /home/builder
|
||||
CI_SDK_INSTALL_FEEDS: base packages luci routing
|
||||
CI_SDK_BUILD_CONFIG: >
|
||||
+BUILD_LOG -AUTOREMOVE -ALL -ALL_NONSHARED -ALL_KMODS -SIGNED_PACKAGES
|
||||
|
||||
before_script:
|
||||
- set -o pipefail
|
||||
|
||||
- cd $CI_SDK_TOPDIR
|
||||
- mkdir logs
|
||||
|
||||
- sed -i 's;https://git.openwrt.org;https://gitlab.com/openwrt;' feeds.conf.default
|
||||
- echo "src-include defaults feeds.conf.default" > feeds.conf
|
||||
- echo "src-link ci $CI_PROJECT_DIR" >> feeds.conf
|
||||
|
||||
- >
|
||||
for feed in $CI_SDK_INSTALL_FEEDS ci; do
|
||||
sleep_interval="$(( (RANDOM % 60) + 1 ))"
|
||||
echo "Waiting for $sleep_interval seconds..."
|
||||
sleep "${sleep_interval}s"
|
||||
|
||||
./scripts/feeds update "$feed" ;
|
||||
./scripts/feeds install -a -p "$feed" ;
|
||||
done 2>&1 | tee logs/build.log
|
||||
|
||||
- sync
|
||||
- sleep 1
|
||||
- sync
|
||||
|
||||
- >
|
||||
for option in $CI_SDK_BUILD_CONFIG $CI_SDK_BUILD_CONFIG_EXTRA; do
|
||||
echo "$option" | sed -E "s/^\+(.*)$/CONFIG_\1=y/;s/^\-(.*)$/CONFIG_\1=n/" >> .config
|
||||
done
|
||||
- cat .config | tee --append logs/build.log
|
||||
- make defconfig | tee --append logs/build.log
|
||||
- ./scripts/diffconfig.sh | tee --append logs/build.log
|
||||
|
||||
- sync
|
||||
- sleep 1
|
||||
- sync
|
||||
|
||||
script:
|
||||
- >
|
||||
test -n "$CI_SDK_BEFORE_COMPILE_COMMAND" &&
|
||||
echo "Running CI_SDK_BEFORE_COMPILE_COMMAND=$CI_SDK_BEFORE_COMPILE_COMMAND" &&
|
||||
/bin/sh -c "$CI_SDK_BEFORE_COMPILE_COMMAND"
|
||||
|
||||
- >
|
||||
for package in $CI_SDK_BUILD_PACKAGES; do
|
||||
make V=sc package/$package/{download,check} FIXUP=1
|
||||
done 2>&1 | tee --append logs/build.log
|
||||
|
||||
- >
|
||||
topdir=$(pwd);
|
||||
for feed in $(find feeds -follow -name .git); do
|
||||
pushd $(dirname $feed) > /dev/null; git diff-index --exit-code HEAD || {
|
||||
ret=$?
|
||||
echo "Feed $(dirname $feed) packages integrity issues, please check feed-packages-hash-issues.patch from artifacts"
|
||||
git diff | tee $topdir/feed-packages-hash-issues.patch
|
||||
exit $ret
|
||||
}
|
||||
popd > /dev/null
|
||||
done
|
||||
|
||||
- >
|
||||
for package in $CI_SDK_BUILD_PACKAGES; do
|
||||
make -j ${CI_SDK_BUILD_PARALLEL:-$(nproc)} package/$package/compile
|
||||
done 2>&1 | tee --append logs/build.log
|
||||
|
||||
after_script:
|
||||
- cp -R "$CI_SDK_TOPDIR/logs" $CI_PROJECT_DIR
|
||||
- >
|
||||
if grep -qr 'make\[[[:digit:]]\].*Error [[:digit:]]$' logs; then
|
||||
printf "\n====== Showing Make errors found in the log files ======";
|
||||
for file in $(grep -lr 'make\[[[:digit:]]\].*Error [[:digit:]]$' logs); do
|
||||
printf "\n====== Make errors from $CI_JOB_URL/artifacts/file/$file ======\n" ;
|
||||
grep -r -C5 'make\[[[:digit:]]\].*Error [[:digit:]]$' $file ;
|
||||
done
|
||||
fi
|
||||
|
||||
artifacts:
|
||||
expire_in: 1 month
|
||||
when: always
|
||||
paths:
|
||||
- logs/
|
||||
- ./feed-packages-hash-issues.patch
|
||||
|
||||
.generate SDK package build jobs:
|
||||
stage: generate
|
||||
image: alpine
|
||||
|
||||
variables:
|
||||
CI_SDK_BUILD_PACKAGES: |
|
||||
please-provide
|
||||
CI_SDK_PACKAGE_JOBS_TEMPLATE: |
|
||||
include:
|
||||
- remote: https://gitlab.com/prpl-foundation/prplos/prplos/-/raw/$CI_DESIGNATED_BRANCH/.gitlab/sdk.yml
|
||||
{% for sdk in env['CI_SDK_TARGETS'].rstrip().split("\n") %}
|
||||
{% for package in env['CI_SDK_BUILD_PACKAGES'].rstrip().split("\n") %}
|
||||
build {{ package | trim }} with {{ sdk | trim }} SDK:
|
||||
extends: .build feed with {{ sdk | trim }} SDK
|
||||
variables:
|
||||
CI_SDK_BUILD_PACKAGES: {{ package | trim }}
|
||||
CI_SDK_BUILD_CONFIG_EXTRA: +PACKAGE_{{ package | trim }}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
before_script:
|
||||
- apk add python3 py3-pip
|
||||
- pip3 install jinja2
|
||||
|
||||
script:
|
||||
- |
|
||||
echo "$CI_SDK_PACKAGE_JOBS_TEMPLATE" | python3 -c '
|
||||
import os
|
||||
import sys
|
||||
import jinja2
|
||||
sys.stdout.write(
|
||||
jinja2.Template(sys.stdin.read()
|
||||
).render(env=os.environ))' > sdk-package-jobs.yml
|
||||
- cat sdk-package-jobs.yml
|
||||
|
||||
artifacts:
|
||||
paths:
|
||||
- sdk-package-jobs.yml
|
||||
|
||||
.execute SDK package build jobs:
|
||||
stage: build
|
||||
needs:
|
||||
- generate
|
||||
|
||||
trigger:
|
||||
include:
|
||||
- artifact: sdk-package-jobs.yml
|
||||
job: generate
|
||||
strategy: depend
|
||||
|
||||
.build feed with OpenWrt SDK:
|
||||
extends: .build feed with SDK
|
||||
variables:
|
||||
CI_SDK_BEFORE_COMPILE_COMMAND: sudo apt-get update; sudo apt-get install -y python-yaml python3-yaml
|
||||
CI_SDK_TOPDIR: /home/build/openwrt
|
||||
|
||||
.build feed with intel_mips-xrx500 SDK:
|
||||
extends: .build feed with SDK
|
||||
variables:
|
||||
CI_SDK_IMAGE: registry.gitlab.com/prpl-foundation/prplos/prplos/$CI_DESIGNATED_BRANCH/sdk-intel_mips-xrx500:latest
|
||||
CI_SDK_INSTALL_FEEDS: base packages luci routing feed_intel
|
||||
|
||||
.build feed with ipq40xx-generic SDK:
|
||||
extends: .build feed with SDK
|
||||
variables:
|
||||
CI_SDK_IMAGE: registry.gitlab.com/prpl-foundation/prplos/prplos/$CI_DESIGNATED_BRANCH/sdk-ipq40xx-generic:latest
|
||||
|
||||
.build feed with mvebu-cortexa9 SDK:
|
||||
extends: .build feed with SDK
|
||||
variables:
|
||||
CI_SDK_IMAGE: registry.gitlab.com/prpl-foundation/prplos/prplos/$CI_DESIGNATED_BRANCH/sdk-mvebu-cortexa9:latest
|
||||
|
||||
.build feed with ath79-generic-19.07.7 SDK:
|
||||
extends: .build feed with OpenWrt SDK
|
||||
variables:
|
||||
CI_SDK_IMAGE: openwrtorg/sdk:ath79-generic-19.07.7
|
||||
130
.gitlab/testbed.yml
Normal file
130
.gitlab/testbed.yml
Normal file
@@ -0,0 +1,130 @@
|
||||
.testbed:
|
||||
stage: run
|
||||
image: "$CI_REGISTRY_IMAGE/$CI_DESIGNATED_BRANCH/testbed:latest"
|
||||
rules:
|
||||
- if: '$CI_PIPELINE_SOURCE == "schedule"'
|
||||
when: never
|
||||
- when: always
|
||||
variables:
|
||||
DUT_WAN_INTERFACE: eth1
|
||||
TESTBED_MNG_INTERFACE: enp1s0
|
||||
TESTBED_LAN_INTERFACE: enp2s0
|
||||
TESTBED_WAN_INTERFACE: enp3s0
|
||||
TESTBED_TFTP_PATH: /var/lib/tftpboot
|
||||
TFTP_IMAGE_PATH: bin/targets/$DUT_ARCH/$DUT_SUBTARGET
|
||||
TARGET_LAN_IP: 192.168.1.1
|
||||
TARGET_LAN_TEST_HOST: 192.168.1.2
|
||||
DUT_SLEEP_AFTER_BOOT: 90
|
||||
CRAM_REMOTE_COMMAND: ssh root@$TARGET_LAN_IP
|
||||
CRAM_TEST_SUITE: |
|
||||
.gitlab/tests/cram/generic/acceleration-plan-components
|
||||
.gitlab/tests/cram/$DUT_BOARD/acceleration-plan-components
|
||||
.gitlab/tests/cram/generic
|
||||
.gitlab/tests/cram/$DUT_BOARD
|
||||
|
||||
before_script:
|
||||
- touch .run_failed
|
||||
|
||||
- sudo ip link set $TESTBED_LAN_INTERFACE up 2> /dev/null
|
||||
- sudo ip link set $TESTBED_WAN_INTERFACE up 2> /dev/null
|
||||
- sleep 10
|
||||
|
||||
- eval $(ssh-agent -s)
|
||||
- echo "$TESTBED_SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add -
|
||||
- mkdir -p ~/.ssh; chmod 700 ~/.ssh
|
||||
- ssh-keyscan $TESTBED_UART_RELAY_HOST > ~/.ssh/known_hosts 2> /dev/null
|
||||
- chmod 644 ~/.ssh/known_hosts
|
||||
|
||||
- >
|
||||
if [ -n "$TFTP_IMAGE_DOWNLOAD_URL" ]; then
|
||||
echo "Downloading firmware image from $TFTP_IMAGE_DOWNLOAD_URL"
|
||||
curl --fail "$TFTP_IMAGE_DOWNLOAD_URL" > "$TESTBED_TFTP_PATH/$TFTP_IMAGE_FILENAME"
|
||||
else
|
||||
echo "Using firmware image $TFTP_IMAGE_PATH/$TFTP_IMAGE_FILENAME"
|
||||
cp "$TFTP_IMAGE_PATH/$TFTP_IMAGE_FILENAME" "$TESTBED_TFTP_PATH"
|
||||
fi
|
||||
- >
|
||||
if [ -n "$TFTP_IMAGE_UNPACK_COMMAND" ]; then
|
||||
echo "Running $TFTP_IMAGE_UNPACK_COMMAND"
|
||||
eval "$TFTP_IMAGE_UNPACK_COMMAND"
|
||||
fi
|
||||
- .gitlab/scripts/testbed-device.py --target $LABGRID_TARGET boot_into shell
|
||||
- >
|
||||
.gitlab/scripts/testbed-device.py
|
||||
--target $LABGRID_TARGET check_network
|
||||
--network lan
|
||||
--remote-host $TARGET_LAN_TEST_HOST
|
||||
|
||||
- >
|
||||
retry_count=3;
|
||||
while [ $retry_count -gt 0 ]; do
|
||||
echo "Waiting for SSH availability on $TARGET_LAN_IP"
|
||||
ssh-keyscan "$TARGET_LAN_IP" 2>&1 | grep -q "$TARGET_LAN_IP" && break
|
||||
retry_count="$(( retry_count - 1 ))"
|
||||
sleep 5
|
||||
done
|
||||
|
||||
- ssh-keyscan $TARGET_LAN_IP >> ~/.ssh/known_hosts 2> /dev/null
|
||||
- ssh root@$TARGET_LAN_IP "cat /var/log/messages" > syslog-$LABGRID_TARGET.txt 2> /dev/null || true
|
||||
- ssh root@$TARGET_LAN_IP "ubus call system board" | tee system-$LABGRID_TARGET.json
|
||||
|
||||
script:
|
||||
- sleep $DUT_SLEEP_AFTER_BOOT
|
||||
- set -o pipefail
|
||||
- python3 -m cram --verbose $CRAM_TEST_SUITE $CRAM_TEST_SUITE_EXTRA | tee cram-result-$LABGRID_TARGET.txt
|
||||
- rm .run_failed
|
||||
|
||||
after_script:
|
||||
- >
|
||||
[ "$TMATE_ENABLE_SHELL" = "YES" ] && {
|
||||
echo "Starting tmate session..."
|
||||
tmate -n "${TMATE_SESSION_TOKEN}-$CI_JOB_ID" -k "$TMATE_API_KEY" -F new "ssh root@$TARGET_LAN_IP" > /tmp/tmate-session.log 2>&1
|
||||
true
|
||||
}
|
||||
|
||||
- eval $(ssh-agent -s)
|
||||
- echo "$TESTBED_SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add -
|
||||
- mkdir -p ~/.ssh; chmod 700 ~/.ssh
|
||||
- ssh-keyscan $TESTBED_UART_RELAY_HOST > ~/.ssh/known_hosts 2> /dev/null
|
||||
- chmod 644 ~/.ssh/known_hosts
|
||||
|
||||
- ssh-keyscan $TARGET_LAN_IP >> ~/.ssh/known_hosts 2> /dev/null
|
||||
- >
|
||||
ssh root@$TARGET_LAN_IP exit && {
|
||||
ssh root@$TARGET_LAN_IP ps > processes-$LABGRID_TARGET.txt
|
||||
ssh root@$TARGET_LAN_IP dmesg > dmesg-$LABGRID_TARGET.txt
|
||||
ssh root@$TARGET_LAN_IP "cat /var/log/messages" > syslog-$LABGRID_TARGET.txt
|
||||
ssh root@$TARGET_LAN_IP opkg list > opkg_list-$LABGRID_TARGET.txt
|
||||
scp -r root@${TARGET_LAN_IP}:/etc etc > /dev/null
|
||||
scp -r root@${TARGET_LAN_IP}:/tmp/beerocks/logs prplmesh_beerocks_logs > /dev/null
|
||||
} || true
|
||||
|
||||
- .gitlab/scripts/testbed-device.py --target $LABGRID_TARGET power off
|
||||
|
||||
- mv console_$LABGRID_TARGET console_$LABGRID_TARGET.txt || true
|
||||
- >
|
||||
[ "$CI_COMMIT_BRANCH" = "$CI_DESIGNATED_BRANCH" ] && [ "$TMATE_ENABLE_SHELL" != "YES" ] && {
|
||||
test -f .run_failed &&
|
||||
.gitlab/scripts/prpl-jira.py run_failure --target $LABGRID_TARGET || true
|
||||
} || true
|
||||
|
||||
|
||||
artifacts:
|
||||
expire_in: 1 month
|
||||
when: always
|
||||
paths:
|
||||
- .gitlab/tests/cram/**/*.t.err
|
||||
- etc
|
||||
- prplmesh_beerocks_logs
|
||||
- processes-$LABGRID_TARGET.txt
|
||||
- dmesg-$LABGRID_TARGET.txt
|
||||
- syslog-$LABGRID_TARGET.txt
|
||||
- system-$LABGRID_TARGET.json
|
||||
- console_$LABGRID_TARGET.txt
|
||||
- cram-result-$LABGRID_TARGET.txt
|
||||
- opkg_list-$LABGRID_TARGET.txt
|
||||
|
||||
.testbed true.cz:
|
||||
extends: .testbed
|
||||
variables:
|
||||
TESTBED_UART_RELAY_HOST: uart-relay.testbed.vpn.true.cz
|
||||
10
.gitlab/testbed/glinet-b1300.yml
Normal file
10
.gitlab/testbed/glinet-b1300.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
.glinet-b1300 testbed:
|
||||
extends: .testbed true.cz
|
||||
variables:
|
||||
DUT_ARCH: ipq40xx
|
||||
DUT_SUBTARGET: generic
|
||||
DUT_BOARD: glinet-b1300
|
||||
LABGRID_TARGET: glinet-b1300-initramfs
|
||||
TFTP_IMAGE_FILENAME: openwrt-ipq40xx-generic-glinet_gl-b1300-initramfs-fit-uImage.itb
|
||||
tags:
|
||||
- dut-glinet-b1300
|
||||
10
.gitlab/testbed/nec-wx3000hp.yml
Normal file
10
.gitlab/testbed/nec-wx3000hp.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
.nec-wx3000hp testbed:
|
||||
extends: .testbed true.cz
|
||||
variables:
|
||||
DUT_ARCH: intel_mips
|
||||
DUT_SUBTARGET: xrx500
|
||||
DUT_BOARD: nec-wx3000hp
|
||||
LABGRID_TARGET: nec-wx3000hp-nand
|
||||
TFTP_IMAGE_FILENAME: AX3000_1600_ETH_11AXUCI_ASURADA-squashfs-fullimage.img
|
||||
tags:
|
||||
- dut-nec-wx3000hp
|
||||
11
.gitlab/testbed/turris-omnia.yml
Normal file
11
.gitlab/testbed/turris-omnia.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
.turris-omnia testbed:
|
||||
extends: .testbed true.cz
|
||||
variables:
|
||||
DUT_ARCH: mvebu
|
||||
DUT_SUBTARGET: cortexa9
|
||||
DUT_BOARD: turris-omnia
|
||||
DUT_WAN_INTERFACE: eth2
|
||||
LABGRID_TARGET: turris-omnia-initramfs
|
||||
TFTP_IMAGE_FILENAME: openwrt-mvebu-cortexa9-cznic_turris-omnia-initramfs-kernel.bin
|
||||
tags:
|
||||
- dut-turris-omnia
|
||||
12
.gitlab/tests/README.md
Normal file
12
.gitlab/tests/README.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# Content
|
||||
|
||||
This directory contains tests.
|
||||
|
||||
## cram
|
||||
|
||||
Contains [Cram](https://github.com/brodie/cram/) based tests. [Cram](https://github.com/brodie/cram/) is a functional
|
||||
testing framework for command line applications. [Cram](https://github.com/brodie/cram/) tests look like snippets of
|
||||
interactive shell sessions. Cram runs each command and compares the command output in the test with the command's actual
|
||||
output.
|
||||
|
||||
Tests in this directory should be run against device, usually over network via SSH connection.
|
||||
19
.gitlab/tests/cram/README.md
Normal file
19
.gitlab/tests/cram/README.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# Content
|
||||
|
||||
This directory contains Cram based tests.
|
||||
|
||||
## generic
|
||||
|
||||
Contains tests which could be used on all devices.
|
||||
|
||||
## turris-omnia
|
||||
|
||||
Contains tests which could be used against `Turris Omnia` device.
|
||||
|
||||
## glinet-b1300
|
||||
|
||||
Contains tests which could be used against `Gl.iNet B1300` device.
|
||||
|
||||
## nec-wx3000hp
|
||||
|
||||
Contains tests which could be used against `NEC WX3000HP` device.
|
||||
@@ -0,0 +1,41 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Check that client is able to get new lease:
|
||||
|
||||
$ sudo nmap --script broadcast-dhcp-discover -e $TESTBED_LAN_INTERFACE 2>&1 | egrep '(Server|Router|Subnet)' | sort
|
||||
| Domain Name Server: 192.168.1.1
|
||||
| Router: 192.168.1.1
|
||||
| Server Identifier: 192.168.1.1
|
||||
| Subnet Mask: 255.255.255.0
|
||||
|
||||
Remove dhcp-server rule:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP ubus-cli Firewall.X_Prpl_Service.dhcp-server-" > /dev/null; sleep .5
|
||||
|
||||
Check that client is unable to get new lease:
|
||||
|
||||
$ sudo nmap --script broadcast-dhcp-discover -e $TESTBED_LAN_INTERFACE 2>&1 | egrep '(Server|Router|Subnet)'
|
||||
[1]
|
||||
|
||||
Add back firewall rule for dhcp-server access from LAN:
|
||||
|
||||
$ printf "\
|
||||
> ubus-cli Firewall.X_Prpl_Service+{Alias='dhcp-server'}
|
||||
> ubus-cli Firewall.X_Prpl_Service.dhcp-server.Action=Accept
|
||||
> ubus-cli Firewall.X_Prpl_Service.dhcp-server.DestinationPort=67
|
||||
> ubus-cli Firewall.X_Prpl_Service.dhcp-server.IPVersion=4
|
||||
> ubus-cli Firewall.X_Prpl_Service.dhcp-server.Interface=br-lan
|
||||
> ubus-cli Firewall.X_Prpl_Service.dhcp-server.Protocol=UDP
|
||||
> ubus-cli Firewall.X_Prpl_Service.dhcp-server.Enable=1
|
||||
> " > /tmp/cram
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/cram)'" > /dev/null
|
||||
|
||||
Check that client is able to get new lease again:
|
||||
|
||||
$ sudo nmap --script broadcast-dhcp-discover -e $TESTBED_LAN_INTERFACE 2>&1 | egrep '(Server|Router|Subnet)' | sort
|
||||
| Domain Name Server: 192.168.1.1
|
||||
| Router: 192.168.1.1
|
||||
| Server Identifier: 192.168.1.1
|
||||
| Subnet Mask: 255.255.255.0
|
||||
@@ -0,0 +1,26 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Check that HTTP from LAN is allowed by default:
|
||||
|
||||
$ R "iptables -L INPUT_Services -v -n | grep 'br-lan.*dpt:80$'"
|
||||
* ACCEPT tcp -- br-lan * 0.0.0.0/0 0.0.0.0/0 tcp dpt:80 (glob)
|
||||
|
||||
Disable firewall rule for HTTP access from LAN:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP ubus-cli Firewall.X_Prpl_Service.http.Enable=0" > /dev/null; sleep .5
|
||||
|
||||
Check that HTTP from LAN is forbidden:
|
||||
|
||||
$ R "iptables -L INPUT_Services -v -n | grep 'br-lan.*dpt:80$'"
|
||||
[1]
|
||||
|
||||
Enable firewall rule for HTTP access from LAN:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP ubus-cli Firewall.X_Prpl_Service.http.Enable=1" > /dev/null; sleep .5
|
||||
|
||||
Check that HTTP from LAN is allowed again:
|
||||
|
||||
$ R "iptables -L INPUT_Services -v -n | grep 'br-lan.*dpt:80$'"
|
||||
* ACCEPT tcp -- br-lan * 0.0.0.0/0 0.0.0.0/0 tcp dpt:80 (glob)
|
||||
@@ -0,0 +1,65 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Configure port trigger rule:
|
||||
|
||||
$ printf "\
|
||||
> ubus-cli Firewall.X_Prpl_PortTrigger+{Alias='test'}
|
||||
> ubus-cli Firewall.X_Prpl_PortTrigger.test.Port=6000
|
||||
> ubus-cli Firewall.X_Prpl_PortTrigger.test.Protocol="6"
|
||||
> ubus-cli Firewall.X_Prpl_PortTrigger.test.Timer=7
|
||||
> ubus-cli Firewall.X_Prpl_PortTrigger.test.Rule+{Alias='test-rule'}
|
||||
> ubus-cli Firewall.X_Prpl_PortTrigger.test.Rule.test-rule.Port=8000
|
||||
> ubus-cli Firewall.X_Prpl_PortTrigger.test.Rule.test-rule.Protocol="17"
|
||||
> ubus-cli Firewall.X_Prpl_PortTrigger.test.Enable=1
|
||||
> " > /tmp/cram
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/cram)'" > /dev/null; sleep 1
|
||||
|
||||
Check that there is NFQUEUE rule:
|
||||
|
||||
$ R "iptables -L FORWARD_PortTrigger -n | grep 6000"
|
||||
NFQUEUE tcp -- 0.0.0.0/0 0.0.0.0/0 tcp dpt:6000[8 bytes of unknown target data]
|
||||
|
||||
Disable port trigger rule:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP ubus-cli Firewall.X_Prpl_PortTrigger.test.Enable=0" > /dev/null; sleep 1
|
||||
|
||||
Check that there is no NFQUEUE rule:
|
||||
|
||||
$ R "iptables -L FORWARD_PortTrigger -n | grep 6000"
|
||||
[1]
|
||||
|
||||
Enable port trigger rule:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP ubus-cli Firewall.X_Prpl_PortTrigger.test.Enable=1" > /dev/null; sleep 1
|
||||
|
||||
Add route to 10.10.10.10 via TARGET_LAN_IP:
|
||||
|
||||
$ sudo ip route add 10.10.10.10/32 via $TARGET_LAN_IP dev $TESTBED_LAN_INTERFACE 2> /dev/null
|
||||
|
||||
Trigger port rule:
|
||||
|
||||
$ curl --silent --output /dev/null --max-time 2 http://10.10.10.10:6000 ; sleep 3
|
||||
|
||||
Check that additional rules has been created:
|
||||
|
||||
$ R "iptables -L FORWARD_PortTrigger -n | grep 8000 | sort"
|
||||
ACCEPT udp -- 0.0.0.0/0 192.168.1.2 udp dpt:8000
|
||||
ACCEPT udp -- 192.168.1.2 0.0.0.0/0 udp spt:8000
|
||||
|
||||
Check that the owner IPAddress was correctly set:
|
||||
|
||||
$ R "ubus call Firewall.X_Prpl_PortTrigger _get '{\"rel_path\":\"test.Stats.IPAddress\"}' | jsonfilter -e @[*].IPAddress"
|
||||
192.168.1.2
|
||||
|
||||
Wait for expiration of port trigger and check that everything is disabled:
|
||||
|
||||
$ sleep 6
|
||||
$ R "iptables -L FORWARD_PortTrigger -n | grep 8000"
|
||||
[1]
|
||||
|
||||
Remove port trigger rule and route:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP ubus-cli Firewall.X_Prpl_PortTrigger.test-" > /dev/null
|
||||
$ sudo ip route del 10.10.10.10/32 via $TARGET_LAN_IP dev $TESTBED_LAN_INTERFACE 2> /dev/null
|
||||
@@ -0,0 +1,43 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Configure MAC filter rule:
|
||||
|
||||
$ printf "\
|
||||
> ubus-cli Firewall.Chain.L_Low.Rule+{Alias='test'}
|
||||
> ubus-cli Firewall.Chain.L_Low.Rule.test.X_Prpl_SourceMAC='AA:BB:CC:DD:EE:FF'
|
||||
> ubus-cli Firewall.Chain.L_Low.Rule.test.Target=Accept
|
||||
> ubus-cli Firewall.Chain.L_Low.Rule.test.IPVersion=4
|
||||
> ubus-cli Firewall.Chain.L_Low.Rule.test.Enable=1
|
||||
> " > /tmp/cram
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/cram)'" > /dev/null; sleep 1
|
||||
|
||||
Check that there is correct rule present:
|
||||
|
||||
$ R "iptables -L FORWARD_L_Low -n | grep AA:BB:CC"
|
||||
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 MAC AA:BB:CC:DD:EE:FF
|
||||
|
||||
Disable MAC filter rule:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP ubus-cli Firewall.Chain.L_Low.Rule.test.Enable=0" > /dev/null; sleep 1
|
||||
|
||||
Check that the rule is missing:
|
||||
|
||||
$ R "iptables -L FORWARD_L_Low -n | grep AA:BB:CC"
|
||||
[1]
|
||||
|
||||
Enable MAC filter rule:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP ubus-cli Firewall.Chain.L_Low.Rule.test.Enable=1" > /dev/null; sleep 1
|
||||
|
||||
Check that the rule is back again:
|
||||
|
||||
$ R "iptables -L FORWARD_L_Low -n | grep AA:BB:CC"
|
||||
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 MAC AA:BB:CC:DD:EE:FF
|
||||
|
||||
Remove the rule and check that it is gone:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP ubus-cli Firewall.Chain.L_Low.Rule.test-" > /dev/null; sleep 1
|
||||
$ R "iptables -L FORWARD_L_Low -n | grep AA:BB:CC"
|
||||
[1]
|
||||
@@ -0,0 +1,27 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Set firewall level to High:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP ubus-cli Firewall.Config=High" > /dev/null; sleep 1
|
||||
|
||||
Check that it is set properly:
|
||||
|
||||
$ R "iptables -n -L FORWARD_L_Low | grep references"
|
||||
Chain FORWARD_L_Low (0 references)
|
||||
|
||||
$ R "iptables -n -L FORWARD_L_High | grep references"
|
||||
Chain FORWARD_L_High (2 references)
|
||||
|
||||
Set firewall level to Low:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP ubus-cli Firewall.Config=Low" > /dev/null; sleep 1
|
||||
|
||||
Check that it is set properly:
|
||||
|
||||
$ R "iptables -n -L FORWARD_L_Low | grep references"
|
||||
Chain FORWARD_L_Low (2 references)
|
||||
|
||||
$ R "iptables -n -L FORWARD_L_High | grep references"
|
||||
Chain FORWARD_L_High (0 references)
|
||||
@@ -0,0 +1,42 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Create portmapping:
|
||||
|
||||
$ printf "\
|
||||
> ubus-cli NAT.PortMapping+{Alias='testing'}
|
||||
> ubus-cli NAT.PortMapping.testing.ExternalPort=5000
|
||||
> ubus-cli NAT.PortMapping.testing.Interface=wan
|
||||
> ubus-cli NAT.PortMapping.testing.InternalClient=$TARGET_LAN_TEST_HOST
|
||||
> ubus-cli NAT.PortMapping.testing.InternalPort=12345
|
||||
> ubus-cli NAT.PortMapping.testing.Protocol=TCP
|
||||
> ubus-cli NAT.PortMapping.testing.Enable=1
|
||||
> " > /tmp/cram
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/cram)'" > /dev/null
|
||||
|
||||
Check that portmapping works:
|
||||
|
||||
$ sudo tcpdump -c 1 -l -n -i $TESTBED_LAN_INTERFACE tcp port 12345 > /tmp/tcpdump-works 2> /dev/null &
|
||||
$ sleep 1
|
||||
|
||||
$ echo foo | nc -w 1 10.0.0.2 5000 > /dev/null 2>&1
|
||||
[1]
|
||||
|
||||
$ grep -c 12345 /tmp/tcpdump-works
|
||||
1
|
||||
|
||||
Remove portmapping:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP ubus-cli NAT.PortMapping.testing-" > /dev/null; sleep .5
|
||||
|
||||
Check that portmapping doesnt work:
|
||||
|
||||
$ sudo tcpdump -c 1 -l -n -i $TESTBED_LAN_INTERFACE tcp port 12345 > /tmp/tcpdump 2> /dev/null &
|
||||
$ sleep 1
|
||||
|
||||
$ echo foo | nc -w 1 10.0.0.2 5000 > /dev/null 2>&1
|
||||
[1]
|
||||
|
||||
$ wc -l /tmp/tcpdump
|
||||
0 /tmp/tcpdump
|
||||
@@ -0,0 +1,56 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Create testing user:
|
||||
|
||||
$ printf "\
|
||||
> ubus-cli Users.Group.+{GroupID=999,Groupname='testgroup',Alias='testgroup'}
|
||||
> ubus-cli Users.SupportedShell.+{Name='/bin/ash',Alias='ash',Enable='true'}
|
||||
> ubus-cli Users.User.+{UserID=666,Username='testuser',Alias='testuser',GroupParticipation='Users.Group.testgroup.',Password='password',Shell='Users.SupportedShell.ash.'}
|
||||
> sleep 5; passwd -d root
|
||||
> " > /tmp/cram
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/cram)'" > /dev/null
|
||||
$ sleep 1
|
||||
|
||||
Check that user and group exists:
|
||||
|
||||
$ R "grep testgroup /etc/group"
|
||||
testgroup:x:999:testuser
|
||||
|
||||
$ R "grep testuser /etc/passwd"
|
||||
testuser:x:666:999:testuser:/var:/bin/ash
|
||||
|
||||
$ R "grep -c testuser /etc/shadow"
|
||||
1
|
||||
|
||||
Check that we are able to login:
|
||||
|
||||
$ sshpass -ppassword ssh testuser@$TARGET_LAN_IP id
|
||||
uid=666(testuser) gid=999(testgroup) groups=999(testgroup),999(testgroup)
|
||||
|
||||
Delete group and user:
|
||||
|
||||
$ printf "\
|
||||
> ubus-cli Users.User.testuser.-
|
||||
> ubus-cli Users.Group.testgroup.-
|
||||
> " > /tmp/cram
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/cram)'" > /dev/null
|
||||
$ sleep 1
|
||||
|
||||
Check that user and group does not exists:
|
||||
|
||||
$ R "grep testgroup /etc/group"
|
||||
[1]
|
||||
|
||||
$ R "grep testuser /etc/passwd"
|
||||
[1]
|
||||
|
||||
$ R "grep testuser /etc/shadow"
|
||||
[1]
|
||||
|
||||
Check that we are not able to login:
|
||||
|
||||
$ sshpass -ppassword ssh testuser@$TARGET_LAN_IP id
|
||||
Permission denied, please try again.\r (esc)
|
||||
[5]
|
||||
@@ -0,0 +1,69 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Add testing profiles and triggers:
|
||||
|
||||
$ printf "\
|
||||
> ubus-cli X_PRPL-COM_MultiSettings.DetectionAtBoot=1
|
||||
> ubus-cli X_PRPL-COM_MultiSettings.Profile.+{Alias='france-profile'}
|
||||
> ubus-cli X_PRPL-COM_MultiSettings.Profile.france-profile.Name='france'
|
||||
> ubus-cli X_PRPL-COM_MultiSettings.Profile.france-profile.ImpactedModules='testing'
|
||||
> ubus-cli X_PRPL-COM_MultiSettings.Profile.france-profile.Trigger.+{Alias='country-france'}
|
||||
> ubus-cli X_PRPL-COM_MultiSettings.Profile.france-profile.Trigger.country-france.LeftMember='TestingEmitter.CountryCode'
|
||||
> ubus-cli X_PRPL-COM_MultiSettings.Profile.france-profile.Trigger.country-france.RelationalOperator='Equal'
|
||||
> ubus-cli X_PRPL-COM_MultiSettings.Profile.france-profile.Trigger.country-france.RightMember='FR'
|
||||
> ubus-cli X_PRPL-COM_MultiSettings.Profile.+{Alias='usa-profile'}
|
||||
> ubus-cli X_PRPL-COM_MultiSettings.Profile.usa-profile.Name='usa'
|
||||
> ubus-cli X_PRPL-COM_MultiSettings.Profile.usa-profile.ImpactedModules='testing'
|
||||
> ubus-cli X_PRPL-COM_MultiSettings.Profile.usa-profile.Trigger.+{Alias='country-usa'}
|
||||
> ubus-cli X_PRPL-COM_MultiSettings.Profile.usa-profile.Trigger.country-usa.LeftMember='TestingEmitter.CountryCode'
|
||||
> ubus-cli X_PRPL-COM_MultiSettings.Profile.usa-profile.Trigger.country-usa.RelationalOperator='Equal'
|
||||
> ubus-cli X_PRPL-COM_MultiSettings.Profile.usa-profile.Trigger.country-usa.RightMember='US'
|
||||
> " > /tmp/cram
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/cram)'" > /dev/null
|
||||
|
||||
Add testing service:
|
||||
|
||||
$ scp -r $TESTDIR/035-multisettings/* root@${TARGET_LAN_IP}:/
|
||||
|
||||
Restart multisettings service and start testing service:
|
||||
|
||||
$ R "/etc/init.d/testing start 2> /dev/null"
|
||||
$ R "/etc/init.d/multisettings restart 2> /dev/null"
|
||||
|
||||
Check that there is no profile currently selected:
|
||||
|
||||
$ R "cat /var/run/selected_profile"
|
||||
cat: can't open '/var/run/selected_profile': No such file or directory
|
||||
[1]
|
||||
|
||||
Check that TestingReceiver and TestingEmitter have correct default settings:
|
||||
|
||||
$ R "ubus call TestingReceiver _get \"{'rel_path':''}\" | jsonfilter -e @[*].NTPServer1 -e @[*].NTPServer2 | sort"
|
||||
0.eu.pool.ntp.org
|
||||
1.eu.pool.ntp.org
|
||||
|
||||
$ R "ubus call TestingEmitter _get \"{'rel_path':''}\" | jsonfilter -e @[*].CountryCode"
|
||||
EU
|
||||
|
||||
Change profile to France:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP ubus-cli TestingEmitter.CountryCode='FR'" > /dev/null; sleep 5
|
||||
|
||||
Check that french profile is correctly applied:
|
||||
|
||||
$ R "ubus list | grep TestingReceiver"
|
||||
TestingReceiver
|
||||
|
||||
$ R "ubus call TestingReceiver _get \"{'rel_path':''}\" | jsonfilter -e @[*].NTPServer1 -e @[*].NTPServer2 | sort"
|
||||
0.fr.pool.ntp.org
|
||||
1.fr.pool.ntp.org
|
||||
|
||||
Cleanup:
|
||||
|
||||
$ R "/etc/init.d/testing stop 2> /dev/null"
|
||||
$ R "/etc/init.d/multisettings stop 2> /dev/null"
|
||||
$ R "rm -fr /etc/config/multisettings; rm /etc/init.d/testing"
|
||||
$ R "rm -fr /etc/amx/testing; rm -fr /var/run/selected_profile; rm /usr/bin/testing"
|
||||
$ R "/etc/init.d/multisettings start 2> /dev/null"
|
||||
@@ -0,0 +1,6 @@
|
||||
%populate {
|
||||
object TestingReceiver {
|
||||
parameter NTPServer1 = "0.eu.pool.ntp.org";
|
||||
parameter NTPServer2 = "1.eu.pool.ntp.org";
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
%populate {
|
||||
object TestingReceiver {
|
||||
parameter NTPServer1 = "0.fr.pool.ntp.org";
|
||||
parameter NTPServer2 = "1.fr.pool.ntp.org";
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
%populate {
|
||||
object TestingReceiver {
|
||||
parameter NTPServer1 = "0.us.pool.ntp.org";
|
||||
parameter NTPServer2 = "1.us.pool.ntp.org";
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
%config {}
|
||||
%define {
|
||||
object TestingEmitter {
|
||||
string CountryCode {
|
||||
on action validate call check_maximum 2;
|
||||
}
|
||||
}
|
||||
object TestingReceiver {
|
||||
string NTPServer1;
|
||||
string NTPServer2;
|
||||
}
|
||||
}
|
||||
%populate {
|
||||
object TestingEmitter {
|
||||
parameter CountryCode = "EU";
|
||||
}
|
||||
}
|
||||
#include "/var/run/selected_profile";
|
||||
?include "/etc/amx/testing/profile_${profile}.odl":"/etc/amx/testing/profile_default.odl";
|
||||
@@ -0,0 +1,22 @@
|
||||
#!/bin/sh
|
||||
|
||||
name="testing"
|
||||
|
||||
case $1 in
|
||||
start|boot)
|
||||
[ ! -f /usr/bin/${name} ] && ln -s /usr/bin/amxrt /usr/bin/${name}
|
||||
$name -D
|
||||
;;
|
||||
stop|shutdown)
|
||||
if [ -f /var/run/${name}.pid ]; then
|
||||
kill $(cat /var/run/${name}.pid)
|
||||
fi
|
||||
;;
|
||||
restart)
|
||||
$0 stop
|
||||
$0 start
|
||||
;;
|
||||
*)
|
||||
echo "Usage : $0 [start|boot|stop]"
|
||||
;;
|
||||
esac
|
||||
@@ -0,0 +1,73 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Check that random LXC binaries work:
|
||||
|
||||
$ R /opt/prplos/usr/bin/lxc-info
|
||||
lxc-info: No container name specified
|
||||
[1]
|
||||
|
||||
$ R /opt/prplos/usr/bin/lxc-device
|
||||
lxc-device: No container name specified
|
||||
[1]
|
||||
|
||||
Check Cthulhu.Config datamodel:
|
||||
|
||||
$ R "ubus -S call Cthulhu.Config _get"
|
||||
{"Cthulhu.Config.":{"ImageLocation":"/usr/share/rlyeh/images","StorageLocation":"/usr/share/cthulhu","UseOverlayFS":true,"DhcpCommand":"","DefaultBackend":"/usr/lib/cthulhu-lxc/cthulhu-lxc.so","BlobLocation":"/usr/share/rlyeh/blobs"}}
|
||||
|
||||
Check Rlyeh datamodel:
|
||||
|
||||
$ R "ubus -S call Rlyeh _get"
|
||||
{"Rlyeh.":{"ImageLocation":"/usr/share/rlyeh/images","SignatureVerification":false,"StorageLocation":"/usr/share/rlyeh/blobs"}}
|
||||
|
||||
Check SoftwareModules datamodel:
|
||||
|
||||
$ R "ubus -S call SoftwareModules _get"
|
||||
{"SoftwareModules.":{"ExecutionUnitNumberOfEntries":0,"ExecEnvNumberOfEntries":1,"DeploymentUnitNumberOfEntries":0}}
|
||||
|
||||
Check Timingila datamodel:
|
||||
|
||||
$ R "ubus -S call Timingila _get"
|
||||
{"Timingila.":{"RmAfterUninstall":true,"ContainerPluginPath":"/usr/lib/timingila-cthulhu/timingila-cthulhu.so","PackagerPluginPath":"/usr/lib/timingila-rlyeh/timingila-rlyeh.so","version":"alpha"}}
|
||||
|
||||
Check that Rlyeh has no container images:
|
||||
|
||||
$ R "ubus -S call Rlyeh.Images _get"
|
||||
{"Rlyeh.Images.":{}}
|
||||
|
||||
Check that Rlyeh can download testing container:
|
||||
|
||||
$ R "ubus -S call Rlyeh pull '{\"URI\":\"docker://registry.gitlab.com/prpl-foundation/prplos/prplos/prplos-testing-container-intel_mips-xrx500:v1\",\"UUID\":\"testing\"}'"
|
||||
{"retval":""}
|
||||
|
||||
$ R "ubus -t 60 wait_for Rlyeh.Images.1"
|
||||
|
||||
Check that Rlyeh has downloaded the testing container:
|
||||
|
||||
$ R "ubus -S call Rlyeh.Images _get | jsonfilter -e @[*].Name -e @[*].Status | sort"
|
||||
Downloaded
|
||||
prplos/prplos-testing-container-intel_mips-xrx500
|
||||
|
||||
Remove testing container:
|
||||
|
||||
$ R "ubus -S call Rlyeh remove '{\"UUID\":\"testing\",\"Version\":\"v1\"}'"; sleep 5
|
||||
{"retval":""}
|
||||
|
||||
$ R "ubus -S call Rlyeh.Images.1 _get | jsonfilter -e @[*].MarkForRemoval"
|
||||
true
|
||||
|
||||
$ R "ubus -S call Rlyeh gc"
|
||||
{"retval":""}
|
||||
|
||||
Check that Rlyeh has no container images:
|
||||
|
||||
$ R "ubus -S call Rlyeh.Images _get"
|
||||
{"Rlyeh.Images.":{}}
|
||||
|
||||
Check that testing image is gone from the filesystem as well:
|
||||
|
||||
$ R "ls -al /usr/share/rlyeh/images/prplos/prplos-testing-container-intel_mips-xrx500"
|
||||
ls: /usr/share/rlyeh/images/prplos/prplos-testing-container-intel_mips-xrx500: No such file or directory
|
||||
[1]
|
||||
@@ -0,0 +1,163 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Check QoS root datamodel:
|
||||
|
||||
$ R "ubus -S call QoS _get"
|
||||
{"QoS.":{"SupportedControllers":"mod-qos-tc","ShaperNumberOfEntries":1,"QueueNumberOfEntries":5,"MaxClassificationEntries":20,"ClassificationNumberOfEntries":0,"QueueStatsNumberOfEntries":4,"MaxQueueEntries":20,"MaxShaperEntries":20}}
|
||||
|
||||
Check Qos.Node.7 datamodel:
|
||||
|
||||
$ R "ubus call QoS.Node.7 _get | jsonfilter -e @[*].TrafficClasses -e @[*].DropAlgorithm -e @[*].Controller -e @[*].AllInterfaces -e @[*].SchedulerAlgorithm -e @[*].Alias " | sort
|
||||
5, 6, 7
|
||||
DT
|
||||
HTB
|
||||
false
|
||||
mod-qos-tc
|
||||
node-queue-home-voip
|
||||
|
||||
Check QoS.Queue datamodel for stats-home-iptv:
|
||||
|
||||
$ R "ubus call QoS.Queue.5 _get | jsonfilter -e @[*].Alias -e @[*].SchedulerAlgorithm -e @[*].Status -e @[*].Controller -e @[*].TrafficClasses" | sort
|
||||
3
|
||||
Enabled
|
||||
HTB
|
||||
mod-qos-tc
|
||||
queue-home-iptv
|
||||
|
||||
Check QoS.QueueStats datamodel for stats-home-iptv:
|
||||
|
||||
$ R "ubus -S call QoS.QueueStats.4 _get | jsonfilter -e @[*].Status -e @[*].QueueOccupancyPercentage -e @[*].Alias -e @[*].Queue" | sort
|
||||
0
|
||||
Enabled
|
||||
QoS.Queue.queue-home-iptv
|
||||
stats-home-iptv
|
||||
|
||||
Check QoS.Scheduler datamodel:
|
||||
|
||||
$ R "ubus call QoS.Scheduler _get | jsonfilter -e @[*].DefaultQueue -e @[*].SchedulerAlgorithm -e @[*].Status -e @[*].Controller" | sort
|
||||
Enabled
|
||||
HTB
|
||||
QoS.Queue.queue-home-data.
|
||||
mod-qos-tc
|
||||
|
||||
Check QoS.Shaper.1 datamodel:
|
||||
|
||||
$ R "ubus call QoS.Shaper.1 _get | jsonfilter -e @[*].Controller -e @[*].Enable -e @[*].Status" | sort
|
||||
Enabled
|
||||
mod-qos-tc
|
||||
true
|
||||
|
||||
Add a new classification instance and set the DSCP value for IPv4 ICMP packets:
|
||||
|
||||
$ cat > /tmp/new-classification <<EOF
|
||||
> ubus-cli QoS.Classification.+{Alias=icmp_dscp_cs6,Flags=\"class_basic\"}
|
||||
> ubus-cli QoS.Classification.icmp_dscp_cs6.DSCPMark=48
|
||||
> ubus-cli QoS.Classification.icmp_dscp_cs6.Interface=\"Postrouting\"
|
||||
> ubus-cli QoS.Classification.icmp_dscp_cs6.Protocol=1
|
||||
> ubus-cli QoS.Classification.icmp_dscp_cs6.IPVersion=4
|
||||
> ubus-cli QoS.Classification.icmp_dscp_cs6.Enable=1
|
||||
> EOF
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/new-classification)'" > /dev/null
|
||||
$ sleep 2
|
||||
|
||||
Check new classification instance configuration:
|
||||
|
||||
$ R "ubus call QoS.Classification.1 _get | jsonfilter -e @[*].Status -e @[*].DSCPMark -e @[*].Alias -e @[*].Protocol -e @[*].IPVersion" | sort
|
||||
1
|
||||
4
|
||||
48
|
||||
Enabled
|
||||
icmp_dscp_cs6
|
||||
|
||||
$ R "iptables -t mangle -L POSTROUTING_class | grep 'DSCP set'"
|
||||
DSCP icmp -- anywhere anywhere DSCP set 0x30
|
||||
|
||||
Alter the previous classification and set the DSCP marking to 52:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP 'ubus-cli QoS.Classification.icmp_dscp_cs6.DSCPMark=52'" > /dev/null; sleep 2
|
||||
|
||||
Check altered classification instance configuration:
|
||||
|
||||
$ R "ubus call QoS.Classification.1 _get | jsonfilter -e @[*].Status -e @[*].DSCPMark -e @[*].Alias -e @[*].Protocol -e @[*].IPVersion" | sort
|
||||
1
|
||||
4
|
||||
52
|
||||
Enabled
|
||||
icmp_dscp_cs6
|
||||
|
||||
$ R "iptables -t mangle -L POSTROUTING_class | grep 'DSCP set'"
|
||||
DSCP icmp -- anywhere anywhere DSCP set 0x34
|
||||
|
||||
Add a second classification instance. Mark ICMP packets to network 192.168.25.0/24 with value 8 (CS1):
|
||||
|
||||
$ cat > /tmp/new-classification <<EOF
|
||||
> ubus-cli QoS.Classification.+{Alias=icmp_dscp_cs1,Flags=\"class_basic\"}
|
||||
> ubus-cli QoS.Classification.icmp_dscp_cs1.DSCPMark=8
|
||||
> ubus-cli QoS.Classification.icmp_dscp_cs1.Interface="Postrouting"
|
||||
> ubus-cli QoS.Classification.icmp_dscp_cs1.Protocol=1
|
||||
> ubus-cli QoS.Classification.icmp_dscp_cs1.IPVersion=4
|
||||
> ubus-cli QoS.Classification.icmp_dscp_cs1.DestIP=192.168.25.0
|
||||
> ubus-cli QoS.Classification.icmp_dscp_cs1.DestMask="255.255.255.0"
|
||||
> ubus-cli QoS.Classification.icmp_dscp_cs1.Enable=1
|
||||
> EOF
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/new-classification)'" > /dev/null
|
||||
$ sleep 2
|
||||
|
||||
The firewall rule to set a DSCP value for ICMP packets to network 192.168.25.0/24 must be the first one, so change the order:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP 'ubus-cli QoS.Classification.icmp_dscp_cs1.Order=1'" > /dev/null; sleep 2
|
||||
|
||||
Check correct change of packet classification ordering:
|
||||
|
||||
$ R "ubus call QoS.Classification.1 _get | jsonfilter -e @[*].Order"
|
||||
2
|
||||
|
||||
$ R "ubus call QoS.Classification.2 _get | jsonfilter -e @[*].Order"
|
||||
1
|
||||
|
||||
$ R "iptables -t mangle -L POSTROUTING_class | grep 'DSCP set'"
|
||||
DSCP icmp -- anywhere 192.168.25.0/24 DSCP set 0x08
|
||||
DSCP icmp -- anywhere anywhere DSCP set 0x34
|
||||
|
||||
Check default QoS configuration:
|
||||
|
||||
$ R "tc qdisc show dev $DUT_WAN_INTERFACE"
|
||||
qdisc htb 1: root refcnt (2|5|9) r2q 10 default 0x10003 direct_packets_stat [0-9]+ direct_qlen (532|1000) (re)
|
||||
|
||||
$ R "tc class show dev $DUT_WAN_INTERFACE" | sort
|
||||
class htb 1:1 parent 1:32 prio 5 rate 25Mbit ceil 25Mbit burst *b cburst *b (glob)
|
||||
class htb 1:2 parent 1:32 rate 300Mbit ceil 300Mbit burst *b cburst *b (glob)
|
||||
class htb 1:3 parent 1:2 prio 3 rate 250Mbit ceil 250Mbit burst *b cburst *b (glob)
|
||||
class htb 1:32 root rate 325Mbit ceil 325Mbit burst *b cburst *b (glob)
|
||||
class htb 1:4 parent 1:2 prio 1 rate 10Mbit ceil 10Mbit burst *b cburst *b (glob)
|
||||
class htb 1:5 parent 1:2 prio 2 rate 40Mbit ceil 40Mbit burst *b cburst *b (glob)
|
||||
|
||||
$ R "tc filter show dev $DUT_WAN_INTERFACE" | sort
|
||||
filter parent 1: protocol all pref 1 fw.* (re)
|
||||
filter parent 1: protocol all pref 1 fw.*handle 0x4/0x1f classid 1:4 (re)
|
||||
filter parent 1: protocol all pref 2 fw.* (re)
|
||||
filter parent 1: protocol all pref 2 fw.*handle 0x5/0x1f classid 1:5 (re)
|
||||
filter parent 1: protocol all pref 5 fw.* (re)
|
||||
filter parent 1: protocol all pref 5 fw.*handle 0x1/0x1f classid 1:1 (re)
|
||||
|
||||
Let all upstream (LAN -> WAN) UDP packets to network 192.168.55.0/24 go through queue-home-iptv (highest priority):
|
||||
|
||||
$ cat > /tmp/new-classification <<EOF
|
||||
> ubus-cli QoS.Classification.+{Alias=subnet1_high_prio,Flags=\"class_basic\"}
|
||||
> ubus-cli QoS.Classification.subnet1_high_prio.Interface=\"Forward\"
|
||||
> ubus-cli QoS.Classification.subnet1_high_prio.Protocol=17
|
||||
> ubus-cli QoS.Classification.subnet1_high_prio.IPVersion=4
|
||||
> ubus-cli QoS.Classification.subnet1_high_prio.DestIP=192.168.55.0
|
||||
> ubus-cli QoS.Classification.subnet1_high_prio.DestMask="255.255.255.0"
|
||||
> ubus-cli QoS.Classification.subnet1_high_prio.TrafficClass=5
|
||||
> ubus-cli QoS.Classification.subnet1_high_prio.Enable=1
|
||||
> EOF
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/new-classification)'" > /dev/null
|
||||
$ sleep 2
|
||||
|
||||
Check that iptables rule is created in the FORWARD_class chain in the mangle table:
|
||||
|
||||
$ R "iptables -t mangle -L FORWARD_class | grep 'MARK xset'"
|
||||
MARK udp -- anywhere 192.168.55.0/24 udp MARK xset 0x4/0x1f
|
||||
@@ -0,0 +1,9 @@
|
||||
Check that REST API is working as expected:
|
||||
|
||||
$ curl --silent --max-time 3 --user admin:admin 'http://192.168.1.1:8080/serviceElements/DeviceInfo.FriendlyName'
|
||||
[{"parameters":{"FriendlyName":"prplHGW"},"path":"DeviceInfo."}] (no-eol)
|
||||
|
||||
Check that REST API is not usable with invalid credentials:
|
||||
|
||||
$ curl --silent --max-time 3 --user admin:failure 'http://192.168.1.1:8080/serviceElements/DeviceInfo.FriendlyName' | grep h1
|
||||
<h1>401 Unauthorized</h1>
|
||||
9
.gitlab/tests/cram/generic/network.t
Normal file
9
.gitlab/tests/cram/generic/network.t
Normal file
@@ -0,0 +1,9 @@
|
||||
Check DUT is reachable over LAN:
|
||||
|
||||
$ ping -c3 -W1 192.168.1.2 | grep '3 packets' | cut -d, -f1-3
|
||||
3 packets transmitted, 3 received, 0% packet loss
|
||||
|
||||
Check DUT is reachable over WAN:
|
||||
|
||||
$ ping -c3 -W1 10.0.0.2 | grep '3 packets' | cut -d, -f1-3
|
||||
3 packets transmitted, 3 received, 0% packet loss
|
||||
11
.gitlab/tests/cram/generic/network_security.t
Normal file
11
.gitlab/tests/cram/generic/network_security.t
Normal file
@@ -0,0 +1,11 @@
|
||||
Check that NO port is open from WAN:
|
||||
|
||||
$ nmap --open 10.0.0.2 | grep open
|
||||
[1]
|
||||
|
||||
Check that only certain ports are open from LAN:
|
||||
|
||||
$ nmap --open 192.168.1.1 | grep open
|
||||
22/tcp open ssh
|
||||
53/tcp open domain
|
||||
8080/tcp open http-proxy
|
||||
12
.gitlab/tests/cram/generic/ubus-cli.t
Normal file
12
.gitlab/tests/cram/generic/ubus-cli.t
Normal file
@@ -0,0 +1,12 @@
|
||||
Check that we've correct DHCPv4 pools:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP 'ubus-cli DHCPv4.Server.Pool.*.Alias?'" | grep Alias= | sort
|
||||
\x1b[32;1mDHCPv4.Server.Pool.1.\x1b[0mAlias="lan"\r (esc)
|
||||
\x1b[32;1mDHCPv4.Server.Pool.1.Client.1.\x1b[0mAlias="client-??:??:??:??:??:??"\r (esc) (glob)
|
||||
\x1b[32;1mDHCPv4.Server.Pool.2.\x1b[0mAlias="guest"\r (esc)
|
||||
|
||||
Check that we've correct DHCPv6 pools:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP 'ubus-cli DHCPv6.Server.Pool.*.Alias?'" | grep Alias= | sort
|
||||
\x1b[32;1mDHCPv6.Server.Pool.1.\x1b[0mAlias="lan"\r (esc)
|
||||
\x1b[32;1mDHCPv6.Server.Pool.2.\x1b[0mAlias="guest"\r (esc)
|
||||
202
.gitlab/tests/cram/generic/ubus.t
Normal file
202
.gitlab/tests/cram/generic/ubus.t
Normal file
@@ -0,0 +1,202 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Check that ubus has all expected services available:
|
||||
|
||||
$ R "ubus list | grep -v '^[[:upper:]]'"
|
||||
dhcp
|
||||
dnsmasq
|
||||
luci-rpc
|
||||
network
|
||||
network.device
|
||||
network.interface
|
||||
network.interface.guest
|
||||
network.interface.lan
|
||||
network.interface.loopback
|
||||
network.interface.wan
|
||||
network.interface.wan6
|
||||
network.wireless
|
||||
service
|
||||
session
|
||||
system
|
||||
uci
|
||||
umdns
|
||||
|
||||
Check that ubus has expected datamodels available:
|
||||
|
||||
$ R "ubus list | grep '[[:upper:]]' | grep -v '\.[[:digit:]]'"
|
||||
ACLManager
|
||||
ACLManager.Role
|
||||
Bridging
|
||||
Bridging.Bridge
|
||||
Cthulhu
|
||||
Cthulhu.Config
|
||||
Cthulhu.Container
|
||||
Cthulhu.Container.Instances
|
||||
Cthulhu.Information
|
||||
Cthulhu.Sandbox
|
||||
Cthulhu.Sandbox.Instances
|
||||
DHCPv4
|
||||
DHCPv4.Client
|
||||
DHCPv4.Server
|
||||
DHCPv4.Server.Pool
|
||||
DHCPv6
|
||||
DHCPv6.Client
|
||||
DHCPv6.Server
|
||||
DHCPv6.Server.Pool
|
||||
Device
|
||||
Device.InterfaceStack
|
||||
DeviceInfo
|
||||
DeviceInfo.DeviceImageFile
|
||||
DeviceInfo.FirmwareImage
|
||||
DeviceInfo.Location
|
||||
DeviceInfo.MemoryStatus
|
||||
DeviceInfo.Processor
|
||||
DeviceInfo.VendorConfigFile
|
||||
DeviceInfo.VendorLogFile
|
||||
Ethernet
|
||||
Ethernet.Interface
|
||||
Ethernet.Link
|
||||
Ethernet.VLANTermination
|
||||
Firewall
|
||||
Firewall.Chain
|
||||
Firewall.Level
|
||||
Firewall.X_Prpl_DMZ
|
||||
Firewall.X_Prpl_Pinhole
|
||||
Firewall.X_Prpl_Policy
|
||||
Firewall.X_Prpl_PortTrigger
|
||||
Firewall.X_Prpl_Service
|
||||
IP
|
||||
IP.ActivePort
|
||||
IP.Interface
|
||||
ManagementServer
|
||||
ManagementServer.ConnRequest
|
||||
ManagementServer.InternalSettings
|
||||
ManagementServer.State
|
||||
ManagementServer.Stats
|
||||
ManagementServer.Subscription
|
||||
NAT
|
||||
NAT.InterfaceSetting
|
||||
NAT.PortMapping
|
||||
NetDev
|
||||
NetDev.ConversionTable
|
||||
NetDev.ConversionTable.Protocol
|
||||
NetDev.ConversionTable.Scope
|
||||
NetDev.ConversionTable.Table
|
||||
NetDev.Link
|
||||
NetModel
|
||||
NetModel.Intf
|
||||
QoS
|
||||
QoS.Classification
|
||||
QoS.Node
|
||||
QoS.Queue
|
||||
QoS.QueueStats
|
||||
QoS.Scheduler
|
||||
QoS.Shaper
|
||||
Rlyeh
|
||||
Rlyeh.Images
|
||||
Routing
|
||||
Routing.RIP
|
||||
Routing.RIP.InterfaceSetting
|
||||
Routing.RouteInformation
|
||||
Routing.RouteInformation.InterfaceSetting
|
||||
Routing.Router
|
||||
SoftwareModules
|
||||
SoftwareModules.DeploymentUnit
|
||||
SoftwareModules.ExecEnv
|
||||
SoftwareModules.ExecutionUnit
|
||||
Time
|
||||
Time.X_PRPL_TimeServer
|
||||
Time.X_PRPL_TimeServer.Intf
|
||||
Timingila
|
||||
Users
|
||||
Users.Group
|
||||
Users.Role
|
||||
Users.SupportedShell
|
||||
Users.User
|
||||
X_PRPL-COM_MultiSettings
|
||||
X_PRPL-COM_MultiSettings.Profile
|
||||
X_PRPL-COM_PersistentConfiguration
|
||||
X_PRPL-COM_PersistentConfiguration.Config
|
||||
X_PRPL-COM_PersistentConfiguration.Config.Security
|
||||
X_PRPL-COM_PersistentConfiguration.Service
|
||||
X_PRPL_WANManager
|
||||
X_PRPL_WANManager.WAN
|
||||
|
||||
Check that we've correct bridge aliases:
|
||||
|
||||
$ R "ubus call Bridging _get \"{'rel_path':'Bridge.*.Alias'}\" | jsonfilter -e @[*].Alias | sort"
|
||||
guest
|
||||
lan
|
||||
|
||||
Check that we've correct DHCP pool settings:
|
||||
|
||||
$ R "ubus call DHCPv4.Server.Pool _get \"{'rel_path':'*'}\" | grep -E '(Alias|MinAddres|MaxAddress|Enable|Servers|Status)' | sort"
|
||||
\t\t"Alias": "guest", (esc)
|
||||
\t\t"Alias": "lan", (esc)
|
||||
\t\t"DNSServers": "192.168.1.1", (esc)
|
||||
\t\t"DNSServers": "192.168.2.1", (esc)
|
||||
\t\t"Enable": true, (esc)
|
||||
\t\t"Enable": true, (esc)
|
||||
\t\t"MaxAddress": "192.168.1.249", (esc)
|
||||
\t\t"MaxAddress": "192.168.2.249", (esc)
|
||||
\t\t"MinAddress": "192.168.1.100", (esc)
|
||||
\t\t"MinAddress": "192.168.2.100", (esc)
|
||||
\t\t"Status": "Enabled", (esc)
|
||||
\t\t"Status": "Enabled", (esc)
|
||||
|
||||
$ R "ubus call DHCPv6.Server.Pool _get \"{'rel_path':'*'}\" | grep -E '(Alias|Enable|Status)' | sort"
|
||||
\t\t"Alias": "guest", (esc)
|
||||
\t\t"Alias": "lan", (esc)
|
||||
\t\t"Enable": true, (esc)
|
||||
\t\t"Enable": true, (esc)
|
||||
\t\t"IANAEnable": false, (esc)
|
||||
\t\t"IANAEnable": false, (esc)
|
||||
\t\t"IAPDEnable": false, (esc)
|
||||
\t\t"IAPDEnable": false, (esc)
|
||||
\t\t"Status": "Enabled", (esc)
|
||||
\t\t"Status": "Enabled", (esc)
|
||||
|
||||
Check that we've correct Time.CurrentLocalTime:
|
||||
|
||||
$ time=$(R "ubus call Time _get '{\"rel_path\":\"CurrentLocalTime\"}' | jsonfilter -e @[*].CurrentLocalTime")
|
||||
$ time=$(echo $time | sed -E 's/([0-9\-]+)T([0-9]+:[0-9]+:[0-9]+).*/\1 \2/')
|
||||
$ time=$(date -d "$time" +'%s')
|
||||
$ sys=$(R date +"%s")
|
||||
$ diff=$(( (sys - time) ))
|
||||
$ tolerance=5
|
||||
$ R logger -t cram "Time.CurrentLocalTime=$(date -d @$time +'%c') SystemTime=$(date -d @$sys +'%c') diff=${diff}s tolerance=${tolerance}s"
|
||||
$ test "$diff" -le "$tolerance" && echo "Time is OK"
|
||||
Time is OK
|
||||
|
||||
Check that aclmanager has expected setup:
|
||||
|
||||
$ R "ubus call ACLManager.Role _get '{\"rel_path\":\"*\"}' | jsonfilter -e @[*].Name -e @[*].Alias | sort"
|
||||
admin
|
||||
cpe-Role-1
|
||||
cpe-Role-2
|
||||
cpe-Role-3
|
||||
guest
|
||||
operator
|
||||
|
||||
Check that Users.Role component has expected setup:
|
||||
|
||||
$ R "ubus call Users.Role _get '{\"rel_path\":\"\"}' | jsonfilter -e @[*].Alias -e @[*].RoleName | sort"
|
||||
acl
|
||||
acl-role
|
||||
admin
|
||||
admin-role
|
||||
guest
|
||||
guest-role
|
||||
|
||||
Check that we've correct hostname and release info:
|
||||
|
||||
$ R "ubus -S call system board | jsonfilter -e '@.hostname' -e '@.release.distribution'"
|
||||
prplOS
|
||||
OpenWrt
|
||||
|
||||
Check that netifd service is running:
|
||||
|
||||
$ R "ubus -S call service list | jsonfilter -e '@.network.instances.instance1.running'"
|
||||
true
|
||||
26
.gitlab/tests/cram/glinet-b1300/010-network.t
Normal file
26
.gitlab/tests/cram/glinet-b1300/010-network.t
Normal file
@@ -0,0 +1,26 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Check correct routing table:
|
||||
|
||||
$ R ip route
|
||||
default via 10.0.0.1 dev eth1 proto static src 10.0.0.2
|
||||
10.0.0.0/24 dev eth1 proto kernel scope link src 10.0.0.2
|
||||
192.168.1.0/24 dev br-lan proto kernel scope link src 192.168.1.1
|
||||
192.168.2.0/24 dev br-guest proto kernel scope link src 192.168.2.1 linkdown
|
||||
|
||||
Check correct interface setup:
|
||||
|
||||
$ R "ip link | grep ^\\\\d | cut -d: -f2-" | LC_ALL=C sort
|
||||
br-guest: <NO-CARRIER,BROADCAST,UP> mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default qlen 1000
|
||||
br-lan: <BROADCAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP mode DEFAULT group default qlen 1000
|
||||
eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master br-lan state UP mode DEFAULT group default qlen 1000
|
||||
eth1: <BROADCAST,UP,LOWER_UP> mtu 1500 qdisc htb state UP mode DEFAULT group default qlen 1000
|
||||
ifb0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 32
|
||||
ifb1: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 32
|
||||
lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
|
||||
teql0: <NOARP> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 100
|
||||
veth_gene_0@if2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master br-lan state UP mode DEFAULT group default qlen 1000
|
||||
wlan0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
|
||||
wlan1: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
|
||||
186
.gitlab/tests/cram/glinet-b1300/020-ubus.t
Normal file
186
.gitlab/tests/cram/glinet-b1300/020-ubus.t
Normal file
@@ -0,0 +1,186 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Check that we've correct system info:
|
||||
|
||||
$ R "ubus call system board | jsonfilter -e @.system -e @.model -e @.board_name"
|
||||
ARMv7 Processor rev 5 (v7l)
|
||||
GL.iNet GL-B1300
|
||||
glinet,gl-b1300
|
||||
|
||||
$ R "ubus call DeviceInfo _get | jsonfilter -e '@[\"DeviceInfo.\"].ProductClass'"
|
||||
GL.iNet GL-B1300
|
||||
|
||||
Check that we've correct bridge port aliases:
|
||||
|
||||
$ R "ubus call Bridging _get \"{'rel_path':'Bridge.*.Port.*.Alias'}\" | jsonfilter -e @[*].Alias | sort"
|
||||
ETH0
|
||||
GUEST
|
||||
bridge
|
||||
default_radio0
|
||||
default_radio1
|
||||
guest_radio0
|
||||
guest_radio1
|
||||
|
||||
Check that we've correct ethernet interface details:
|
||||
|
||||
$ R "ubus call Ethernet _get \"{'rel_path':'Interface.'}\" | grep -E '(Alias|Enable|Name)' | sort"
|
||||
\t\t"Alias": "ETH0", (esc)
|
||||
\t\t"Alias": "ETH1", (esc)
|
||||
\t\t"EEEEnable": false, (esc)
|
||||
\t\t"EEEEnable": false, (esc)
|
||||
\t\t"Enable": true, (esc)
|
||||
\t\t"Enable": true, (esc)
|
||||
\t\t"Name": "eth0", (esc)
|
||||
\t\t"Name": "eth1", (esc)
|
||||
|
||||
Check that we've correct ethernet link details:
|
||||
|
||||
$ R "ubus call Ethernet _get \"{'rel_path':'Link.'}\" | grep -E '(Alias|Enable|Name)' | sort"
|
||||
\t\t"Alias": "ETH1", (esc)
|
||||
\t\t"Alias": "GUEST", (esc)
|
||||
\t\t"Alias": "LAN", (esc)
|
||||
\t\t"Alias": "LO", (esc)
|
||||
\t\t"Enable": true, (esc)
|
||||
\t\t"Enable": true, (esc)
|
||||
\t\t"Enable": true, (esc)
|
||||
\t\t"Enable": true, (esc)
|
||||
\t\t"Name": "br-guest", (esc)
|
||||
\t\t"Name": "br-lan", (esc)
|
||||
\t\t"Name": "eth1", (esc)
|
||||
\t\t"Name": "lo", (esc)
|
||||
|
||||
Check that IP.Interface provides expected output:
|
||||
|
||||
$ R "ubus call IP _get '{\"rel_path\":\"Interface.\",\"depth\":100}' | jsonfilter -e @[*].Alias -e @[*].Name -e @[*].IPAddress -e @[*].SubnetMask | sort" | egrep -v '(^f[0-9a-z:]+|^$)'
|
||||
10.0.0.2
|
||||
127.0.0.1
|
||||
192.168.1.1
|
||||
192.168.2.1
|
||||
255.0.0.0
|
||||
255.255.255.0
|
||||
255.255.255.0
|
||||
255.255.255.0
|
||||
::1
|
||||
GUA
|
||||
GUA
|
||||
GUA_IAPD
|
||||
GUA_IAPD
|
||||
GUA_RA
|
||||
GUA_RA
|
||||
ULA
|
||||
ULA64
|
||||
br-guest
|
||||
br-lan
|
||||
eth1
|
||||
guest
|
||||
guest
|
||||
lan
|
||||
lan
|
||||
lo
|
||||
loopback
|
||||
loopback_ipv4
|
||||
loopbackipv6
|
||||
wan
|
||||
wan
|
||||
|
||||
Check that NAT.Interface provides expected output:
|
||||
|
||||
$ R "ubus call NAT _get '{\"rel_path\":\"InterfaceSetting.\",\"depth\":100}' | jsonfilter -e @[*].Alias -e @[*].Interface"
|
||||
lan
|
||||
guest
|
||||
wan
|
||||
br-lan
|
||||
br-guest
|
||||
eth1
|
||||
|
||||
Check that NetDev.Link provides expected output:
|
||||
|
||||
$ R "ubus call NetDev _get '{\"rel_path\":\"Link.\",\"depth\":100}' | jsonfilter -e @[*].Name | sort"
|
||||
br-guest
|
||||
br-lan
|
||||
eth0
|
||||
eth1
|
||||
ifb0
|
||||
ifb1
|
||||
lo
|
||||
teql0
|
||||
veth_gene_0
|
||||
wlan0
|
||||
wlan1
|
||||
|
||||
Check that NetModel.Intf provides expected output:
|
||||
|
||||
$ R "ubus call NetModel _get '{\"rel_path\":\"Intf.\",\"depth\":100}' | jsonfilter -e @[*].Alias -e @[*].Flags -e @[*].Name -e @[*].Status | sed '/^$/d' | sort"
|
||||
bridge
|
||||
bridge
|
||||
bridge-GUEST
|
||||
bridge-GUEST
|
||||
bridge-bridge
|
||||
bridge-bridge
|
||||
bridgeport-ETH0
|
||||
bridgeport-ETH0
|
||||
bridgeport-default_radio0
|
||||
bridgeport-default_radio0
|
||||
bridgeport-default_radio1
|
||||
bridgeport-default_radio1
|
||||
bridgeport-guest_radio0
|
||||
bridgeport-guest_radio0
|
||||
bridgeport-guest_radio1
|
||||
bridgeport-guest_radio1
|
||||
ethIntf-ETH0
|
||||
ethIntf-ETH0
|
||||
ethIntf-ETH1
|
||||
ethIntf-ETH1
|
||||
ethLink-ETH1
|
||||
ethLink-ETH1
|
||||
ethLink-GUEST
|
||||
ethLink-GUEST
|
||||
ethLink-LAN
|
||||
ethLink-LAN
|
||||
ethLink-LO
|
||||
ethLink-LO
|
||||
eth_intf netdev
|
||||
eth_intf netdev
|
||||
eth_link
|
||||
eth_link
|
||||
eth_link
|
||||
eth_link
|
||||
false
|
||||
false
|
||||
false
|
||||
false
|
||||
false
|
||||
false
|
||||
false
|
||||
false
|
||||
false
|
||||
false
|
||||
false
|
||||
false
|
||||
false
|
||||
false
|
||||
false
|
||||
false
|
||||
false
|
||||
false
|
||||
inbridge
|
||||
inbridge
|
||||
inbridge
|
||||
inbridge
|
||||
inbridge
|
||||
ip netdev
|
||||
ip netdev
|
||||
ip netdev
|
||||
ip netdev dhcpv[46] dhcpv[46] (re)
|
||||
ip-guest
|
||||
ip-guest
|
||||
ip-lan
|
||||
ip-lan
|
||||
ip-loopback
|
||||
ip-loopback
|
||||
ip-wan
|
||||
ip-wan
|
||||
resolver
|
||||
resolver
|
||||
4
.gitlab/tests/cram/glinet-b1300/021-ubus-cli.t
Normal file
4
.gitlab/tests/cram/glinet-b1300/021-ubus-cli.t
Normal file
@@ -0,0 +1,4 @@
|
||||
Check that we've correct system info:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP 'ubus-cli DeviceInfo.?'" | grep ProductClass
|
||||
\x1b[32;1mDeviceInfo.\x1b[0mProductClass="GL.iNet GL-B1300"\r (esc)
|
||||
9
.gitlab/tests/cram/glinet-b1300/030-wifi.t
Normal file
9
.gitlab/tests/cram/glinet-b1300/030-wifi.t
Normal file
@@ -0,0 +1,9 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Check for correct SSID setup:
|
||||
|
||||
$ R "iwinfo | grep ESSID"
|
||||
wlan0 ESSID: unknown
|
||||
wlan1 ESSID: unknown
|
||||
73
.gitlab/tests/cram/glinet-b1300/100-prplmesh.t
Normal file
73
.gitlab/tests/cram/glinet-b1300/100-prplmesh.t
Normal file
@@ -0,0 +1,73 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Start wireless:
|
||||
|
||||
$ R logger -t cram "Start wireless"
|
||||
$ R "uci set wireless.radio0.disabled='0'; uci set wireless.radio1.disabled='0'; uci commit; wifi up"
|
||||
$ sleep 30
|
||||
|
||||
Check that hostapd is operating after reboot:
|
||||
|
||||
$ R logger -t cram "Check that hostapd is operating after reboot"
|
||||
$ R "ps w" | sed -nE 's/.*(\/usr\/sbin\/hostapd.*)/\1/p' | LC_ALL=C sort
|
||||
/usr/sbin/hostapd -s -P /var/run/wifi-phy0.pid -B /var/run/hostapd-phy0.conf
|
||||
/usr/sbin/hostapd -s -P /var/run/wifi-phy1.pid -B /var/run/hostapd-phy1.conf
|
||||
|
||||
Restart prplmesh:
|
||||
|
||||
$ R logger -t cram "Restart prplmesh"
|
||||
$ R "/etc/init.d/prplmesh gateway_mode && sleep 5" > /dev/null 2>&1
|
||||
$ sleep 60
|
||||
|
||||
Check VAP setup after restart:
|
||||
|
||||
$ R logger -t cram "Check VAP setup after restart"
|
||||
$ R "iwinfo | grep ESSID"
|
||||
wlan0 ESSID: "prplOS"
|
||||
wlan0-1 ESSID: "prplOS-guest"
|
||||
wlan1 ESSID: "prplOS"
|
||||
wlan1-1 ESSID: "prplOS-guest"
|
||||
|
||||
Check that prplmesh processes are running:
|
||||
|
||||
$ R logger -t cram "Check that prplmesh processes are running"
|
||||
$ R "ps w" | sed -nE 's/.*(\/opt\/prplmesh\/bin.*)/\1/p' | LC_ALL=C sort
|
||||
/opt/prplmesh/bin/beerocks_agent
|
||||
/opt/prplmesh/bin/beerocks_controller
|
||||
/opt/prplmesh/bin/beerocks_fronthaul -i wlan0
|
||||
/opt/prplmesh/bin/beerocks_fronthaul -i wlan1
|
||||
/opt/prplmesh/bin/ieee1905_transport
|
||||
|
||||
Check that prplmesh is operational:
|
||||
|
||||
$ R logger -t cram "Check that prplmesh is operational"
|
||||
$ R "/opt/prplmesh/scripts/prplmesh_utils.sh status" | sed -E 's/.*(\/opt\/prplmesh.*)/\1/' | LC_ALL=C sort
|
||||
\x1b[0m (esc)
|
||||
\x1b[0m\x1b[1;32mOK Main radio agent operational (esc)
|
||||
\x1b[1;32moperational test success! (esc)
|
||||
/opt/prplmesh/bin/beerocks_agent
|
||||
/opt/prplmesh/bin/beerocks_controller
|
||||
/opt/prplmesh/bin/beerocks_controller
|
||||
/opt/prplmesh/bin/beerocks_fronthaul
|
||||
/opt/prplmesh/bin/beerocks_fronthaul
|
||||
/opt/prplmesh/bin/ieee1905_transport
|
||||
/opt/prplmesh/scripts/prplmesh_utils.sh: status
|
||||
OK wlan0 radio agent operational
|
||||
OK wlan1 radio agent operational
|
||||
executing operational test using bml
|
||||
|
||||
Check that prplmesh is in operational state:
|
||||
|
||||
$ R logger -t cram "Check that prplmesh is in operational state"
|
||||
$ R "/opt/prplmesh/bin/beerocks_cli -c bml_conn_map" | egrep '(wlan|OK)' | sed -E "s/.*: (wlan[0-9.]+) .*/\1/" | LC_ALL=C sort
|
||||
bml_connect: return value is: BML_RET_OK, Success status
|
||||
bml_disconnect: return value is: BML_RET_OK, Success status
|
||||
bml_nw_map_query: return value is: BML_RET_OK, Success status
|
||||
wlan0
|
||||
wlan0.0
|
||||
wlan0.1
|
||||
wlan1
|
||||
wlan1.0
|
||||
wlan1.1
|
||||
@@ -0,0 +1,35 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Add DMZ host:
|
||||
|
||||
$ printf "\
|
||||
> ubus-cli Firewall.X_Prpl_DMZ+{Alias='test'}
|
||||
> ubus-cli Firewall.X_Prpl_DMZ.test.SourceInterface=eth1
|
||||
> ubus-cli Firewall.X_Prpl_DMZ.test.DestinationIPAddress=192.168.1.186
|
||||
> ubus-cli Firewall.X_Prpl_DMZ.test.Enable=1
|
||||
> " > /tmp/cram
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/cram)'" > /dev/null; sleep 1
|
||||
|
||||
Check that correct firewall rules were created:
|
||||
|
||||
$ R "iptables -t nat -L POSTROUTING_DMZ | grep 186 | sort"
|
||||
SNAT all -- prplOS.lan 192.168.1.186 to:10.0.0.2
|
||||
|
||||
$ R "iptables -t nat -L PREROUTING_DMZ | grep 186 | sort"
|
||||
DNAT all -- anywhere 10.0.0.2 to:192.168.1.186
|
||||
|
||||
$ R "iptables -L FORWARD_DMZ | grep 186 | sort"
|
||||
ACCEPT all -- 192.168.1.186 anywhere
|
||||
ACCEPT all -- anywhere 192.168.1.186
|
||||
|
||||
Remove DMZ host:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP ubus-cli Firewall.X_Prpl_DMZ.test-" > /dev/null; sleep 1
|
||||
|
||||
Check that firewall rules are gone:
|
||||
|
||||
$ R "iptables -t nat -L POSTROUTING_DMZ | grep 186 | sort"
|
||||
$ R "iptables -t nat -L PREROUTING_DMZ | grep 186 | sort"
|
||||
$ R "iptables -L FORWARD_DMZ | grep 186 | sort"
|
||||
@@ -0,0 +1,41 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Get initial state of bridges:
|
||||
|
||||
$ R "brctl show | sort | cut -d$'\t' -f1,4-"
|
||||
br-guest\tno (esc)
|
||||
br-lan\tno\t\teth0 (esc)
|
||||
bridge name\tSTP enabled\tinterfaces (esc)
|
||||
|
||||
Add eth1 to the Guest bridge:
|
||||
|
||||
$ printf ' \
|
||||
> ubus-cli Bridging.Bridge.guest.Port.+{Name="eth1", Alias="ETH1"}\n
|
||||
> ubus-cli Bridging.Bridge.guest.Port.ETH1.Enable=1\n
|
||||
> ' > /tmp/run
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/run)'" > /dev/null
|
||||
$ sleep 5
|
||||
|
||||
Check that eth1 is added to br-guest bridge:
|
||||
|
||||
$ R "brctl show | sort | cut -d$'\t' -f1,4-"
|
||||
br-guest\tno\t\teth1 (esc)
|
||||
br-lan\tno\t\teth0 (esc)
|
||||
bridge name\tSTP enabled\tinterfaces (esc)
|
||||
|
||||
Remove eth1 from the Guest bridge:
|
||||
|
||||
$ printf '\
|
||||
> ubus-cli Bridging.Bridge.guest.Port.ETH1-\n
|
||||
> ' > /tmp/run
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/run)'" > /dev/null
|
||||
$ sleep 5
|
||||
|
||||
Check for initial state of bridges again:
|
||||
|
||||
$ R "brctl show | sort | cut -d$'\t' -f1,4-"
|
||||
br-guest\tno (esc)
|
||||
br-lan\tno\t\teth0 (esc)
|
||||
bridge name\tSTP enabled\tinterfaces (esc)
|
||||
@@ -0,0 +1,96 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Check that Sandbox is not configured properly:
|
||||
|
||||
$ R "ubus -S call Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.1 _get"
|
||||
[4]
|
||||
|
||||
$ R "ubus -S call Cthulhu.Config _get | jsonfilter -e @[*].DhcpCommand"
|
||||
|
||||
|
||||
Configure Sandbox:
|
||||
|
||||
$ cat > /tmp/run-sandbox <<EOF
|
||||
> ubus-cli Cthulhu.Config.DhcpCommand=\"udhcpc -r 192.168.1.200 -i\"
|
||||
> ubus-cli Cthulhu.Sandbox.Instances.1.NetworkNS.Type="Veth"
|
||||
> ubus-cli Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.+
|
||||
> ubus-cli Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.1.Bridge="br-lan"
|
||||
> ubus-cli Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.1.Interface="eth0"
|
||||
> ubus-cli Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.1.EnableDhcp=1
|
||||
> ubus-cli Cthulhu.Sandbox.Instances.1.NetworkNS.Enable=1
|
||||
> ubus-cli "Cthulhu.Sandbox.start(SandboxId=\"generic\")"
|
||||
> EOF
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/run-sandbox)'" > /dev/null
|
||||
$ sleep 10
|
||||
|
||||
Check that Sandbox was configured properly:
|
||||
|
||||
$ R "ubus -S call Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.1 _get"
|
||||
{"Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.1.":{"EnableDhcp":true,"Interface":"eth0","Bridge":"br-lan"}}
|
||||
|
||||
$ R "ubus -S call Cthulhu.Config _get | jsonfilter -e @[*].DhcpCommand"
|
||||
udhcpc -r 192.168.1.200 -i
|
||||
|
||||
Install testing prplOS container v1:
|
||||
|
||||
$ cat > /tmp/run-container <<EOF
|
||||
> ubus-cli "SoftwareModules.InstallDU(URL=\"docker://registry.gitlab.com/prpl-foundation/prplos/prplos/prplos-testing-container-ipq40xx-generic:v1\", UUID=\"prplos-testing\", ExecutionEnvRef=\"generic\")"
|
||||
> EOF
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/run-container)'" > /dev/null
|
||||
|
||||
Check that prplOS container v1 is running:
|
||||
|
||||
$ sleep 30
|
||||
|
||||
$ R "ubus -S call Cthulhu.Container.Instances.1 _get | jsonfilter -e @[*].Status -e @[*].Bundle -e @[*].BundleVersion -e @[*].ContainerId -e @[*].Alias | sort"
|
||||
Running
|
||||
cpe-prplos-testing
|
||||
prplos-testing
|
||||
prplos/prplos-testing-container-ipq40xx-generic
|
||||
v1
|
||||
|
||||
$ R "ssh -y root@192.168.1.200 'cat /etc/container-version' 2> /dev/null"
|
||||
1
|
||||
|
||||
Update to prplOS container v2:
|
||||
|
||||
$ cat > /tmp/run-container <<EOF
|
||||
> ubus-cli "SoftwareModules.DeploymentUnit.cpe-prplos-testing.Update(URL=\"docker://registry.gitlab.com/prpl-foundation/prplos/prplos/prplos-testing-container-ipq40xx-generic:v2\")"
|
||||
> EOF
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/run-container)'" > /dev/null
|
||||
|
||||
Check that prplOS container v2 is running:
|
||||
|
||||
$ sleep 30
|
||||
|
||||
$ R "ubus -S call Cthulhu.Container.Instances.2 _get | jsonfilter -e @[*].Status -e @[*].Bundle -e @[*].BundleVersion -e @[*].ContainerId -e @[*].Alias | sort"
|
||||
Running
|
||||
cpe-prplos-testing
|
||||
prplos-testing
|
||||
prplos/prplos-testing-container-ipq40xx-generic
|
||||
v2
|
||||
|
||||
$ R "ssh -y root@192.168.1.200 'cat /etc/container-version' 2> /dev/null"
|
||||
2
|
||||
|
||||
Uninstall prplOS testing container:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP 'ubus-cli SoftwareModules.DeploymentUnit.cpe-prplos-testing.Uninstall\(\)'" > /dev/null; sleep 5
|
||||
|
||||
Check that prplOS container is not running:
|
||||
|
||||
$ R "ubus -S call Cthulhu.Container.Instances.2 _get"
|
||||
[4]
|
||||
|
||||
Check that Rlyeh has no container images:
|
||||
|
||||
$ R "ubus -S call Rlyeh.Images _get"
|
||||
{"Rlyeh.Images.":{}}
|
||||
|
||||
Check that container image is gone from the filesystem as well:
|
||||
|
||||
$ R "ls -al /usr/share/rlyeh/images/prplos"
|
||||
ls: /usr/share/rlyeh/images/prplos: No such file or directory
|
||||
[1]
|
||||
30
.gitlab/tests/cram/nec-wx3000hp/010-network.t
Normal file
30
.gitlab/tests/cram/nec-wx3000hp/010-network.t
Normal file
@@ -0,0 +1,30 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Check correct routing table:
|
||||
|
||||
$ R ip route
|
||||
default via 10.0.0.1 dev eth1 proto static src 10.0.0.2
|
||||
10.0.0.0/24 dev eth1 proto kernel scope link src 10.0.0.2
|
||||
192.168.1.0/24 dev br-lan proto kernel scope link src 192.168.1.1
|
||||
192.168.2.0/24 dev br-guest proto kernel scope link src 192.168.2.1 linkdown
|
||||
|
||||
Check correct interface setup:
|
||||
|
||||
$ R "ip link" | awk '/^[0-9]+:/ { printf $0; next } { print ";"$2 }' | awk '/lo:/{print} !/00:00:00:00:00:00/{print}' | cut -d\; -f1 | cut -d: -f2- | LC_ALL=C sort
|
||||
br-guest: <NO-CARRIER,BROADCAST,UP> mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default qlen 1000
|
||||
br-lan: <BROADCAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP mode DEFAULT group default qlen 1000
|
||||
eth0_0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
|
||||
eth0_1: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc pfifo_fast master br-lan state DOWN mode DEFAULT group default qlen 1000
|
||||
eth0_2: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc pfifo_fast master br-lan state DOWN mode DEFAULT group default qlen 1000
|
||||
eth0_3: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc pfifo_fast master br-lan state DOWN mode DEFAULT group default qlen 1000
|
||||
eth0_4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master br-lan state UP mode DEFAULT group default qlen 1000
|
||||
eth1: <BROADCAST,UP,LOWER_UP> mtu 1500 qdisc htb state UNKNOWN mode DEFAULT group default qlen 1000
|
||||
ifb0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 32
|
||||
ifb1: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 32
|
||||
lite0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN mode DEFAULT group default qlen 1000
|
||||
lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1
|
||||
loopdev0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
|
||||
teql0: <NOARP> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 100
|
||||
veth_gene_0@if2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master br-lan state UP mode DEFAULT group default qlen 1000
|
||||
10
.gitlab/tests/cram/nec-wx3000hp/020-ubus.t
Normal file
10
.gitlab/tests/cram/nec-wx3000hp/020-ubus.t
Normal file
@@ -0,0 +1,10 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Check that we've correct system info:
|
||||
|
||||
$ R "ubus call system board | jsonfilter -e @.system -e @.model -e @.board_name"
|
||||
GRX500 rev 1.2
|
||||
EASY350 ANYWAN (GRX350) Axepoint Asurada model
|
||||
EASY350 ANYWAN (GRX350) Axepoint Asurada model
|
||||
8
.gitlab/tests/cram/nec-wx3000hp/030-wifi.t
Normal file
8
.gitlab/tests/cram/nec-wx3000hp/030-wifi.t
Normal file
@@ -0,0 +1,8 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Check for correct SSID setup:
|
||||
|
||||
$ R "iwinfo" | awk '/wlan[0-9.]+/{printf("%s %s ",$1,$3)};/Point:/{print $3}' | grep -v 00:00:00:00:00:00
|
||||
[1]
|
||||
76
.gitlab/tests/cram/nec-wx3000hp/100-prplmesh.t
Normal file
76
.gitlab/tests/cram/nec-wx3000hp/100-prplmesh.t
Normal file
@@ -0,0 +1,76 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Start wireless:
|
||||
|
||||
$ R logger -t cram "Start wireless"
|
||||
$ R "uci set wireless.radio0.disabled='0'; uci set wireless.radio2.disabled='0'; uci commit; wifi up"
|
||||
$ sleep 120
|
||||
|
||||
Check that hostapd & supplicant proccess are up after wireless startup:
|
||||
|
||||
$ R logger -t cram "Check that hostapd \& supplicant proccess are up after wireless startup"
|
||||
|
||||
$ R "ps w" | sed -nE 's/.*(\/usr\/sbin\/hostapd.*)/\1/p' | LC_ALL=C sort
|
||||
/usr/sbin/hostapd -s -g /var/run/hostapd/global-hostapd -P /var/run/wifi-global-hostapd.pid -B /var/run/h
|
||||
|
||||
Restart prplmesh:
|
||||
|
||||
$ R logger -t cram "Restart prplmesh"
|
||||
$ R "/etc/init.d/prplmesh gateway_mode > /dev/null 2>&1 && sleep 120"
|
||||
|
||||
Check VAP setup:
|
||||
|
||||
$ R logger -t cram "Check VAP setup"
|
||||
|
||||
$ R "iwinfo | grep ESSID"
|
||||
wlan0 ESSID: "dummy_ssid_0"
|
||||
wlan0.0 ESSID: "prplOS"
|
||||
wlan0.1 ESSID: "prplOS-guest"
|
||||
wlan2 ESSID: "dummy_ssid_2"
|
||||
wlan2.0 ESSID: "prplOS"
|
||||
wlan2.1 ESSID: "prplOS-guest"
|
||||
|
||||
Check that prplmesh processes are running:
|
||||
|
||||
$ R logger -t cram "Check that prplmesh processes are running"
|
||||
|
||||
$ R "ps w" | sed -nE 's/.*(\/opt\/prplmesh\/bin.*)/\1/p' | LC_ALL=C sort
|
||||
/opt/prplmesh/bin/beerocks_agent
|
||||
/opt/prplmesh/bin/beerocks_controller
|
||||
/opt/prplmesh/bin/beerocks_fronthaul -i wlan0
|
||||
/opt/prplmesh/bin/beerocks_fronthaul -i wlan2
|
||||
/opt/prplmesh/bin/ieee1905_transport
|
||||
|
||||
Check that prplmesh is operational:
|
||||
|
||||
$ R logger -t cram "Check that prplmesh is operational"
|
||||
$ R "/opt/prplmesh/scripts/prplmesh_utils.sh status" | sed -E 's/.*(\/opt\/prplmesh.*)/\1/' | LC_ALL=C sort
|
||||
\x1b[0m (esc)
|
||||
\x1b[0m\x1b[1;32mOK Main radio agent operational (esc)
|
||||
\x1b[1;32moperational test success! (esc)
|
||||
/opt/prplmesh/bin/beerocks_agent
|
||||
/opt/prplmesh/bin/beerocks_controller
|
||||
/opt/prplmesh/bin/beerocks_controller
|
||||
/opt/prplmesh/bin/beerocks_fronthaul
|
||||
/opt/prplmesh/bin/beerocks_fronthaul
|
||||
/opt/prplmesh/bin/ieee1905_transport
|
||||
/opt/prplmesh/scripts/prplmesh_utils.sh: status
|
||||
OK wlan0 radio agent operational
|
||||
OK wlan2 radio agent operational
|
||||
executing operational test using bml
|
||||
|
||||
Check that prplmesh is in operational state:
|
||||
|
||||
$ R logger -t cram "Check that prplmesh is in operational state"
|
||||
$ R "/opt/prplmesh/bin/beerocks_cli -c bml_conn_map" | egrep '(wlan|OK)' | sed -E "s/.*: (wlan[0-9.]+) .*/\1/" | LC_ALL=C sort
|
||||
bml_connect: return value is: BML_RET_OK, Success status
|
||||
bml_disconnect: return value is: BML_RET_OK, Success status
|
||||
bml_nw_map_query: return value is: BML_RET_OK, Success status
|
||||
wlan0
|
||||
wlan0.0
|
||||
wlan0.1
|
||||
wlan2
|
||||
wlan2.0
|
||||
wlan2.1
|
||||
@@ -0,0 +1,35 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Add DMZ host:
|
||||
|
||||
$ printf "\
|
||||
> ubus-cli Firewall.X_Prpl_DMZ+{Alias='test'}
|
||||
> ubus-cli Firewall.X_Prpl_DMZ.test.SourceInterface=eth1
|
||||
> ubus-cli Firewall.X_Prpl_DMZ.test.DestinationIPAddress=192.168.1.186
|
||||
> ubus-cli Firewall.X_Prpl_DMZ.test.Enable=1
|
||||
> " > /tmp/cram
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/cram)'" > /dev/null; sleep 1
|
||||
|
||||
Check that correct firewall rules were created:
|
||||
|
||||
$ R "iptables -t nat -L POSTROUTING_DMZ | grep 186 | sort"
|
||||
SNAT all -- prplOS.lan 192.168.1.186 to:10.0.0.2
|
||||
|
||||
$ R "iptables -t nat -L PREROUTING_DMZ | grep 186 | sort"
|
||||
DNAT all -- anywhere 10.0.0.2 to:192.168.1.186
|
||||
|
||||
$ R "iptables -L FORWARD_DMZ | grep 186 | sort"
|
||||
ACCEPT all -- 192.168.1.186 anywhere
|
||||
ACCEPT all -- anywhere 192.168.1.186
|
||||
|
||||
Remove DMZ host:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP ubus-cli Firewall.X_Prpl_DMZ.test-" > /dev/null; sleep 1
|
||||
|
||||
Check that firewall rules are gone:
|
||||
|
||||
$ R "iptables -t nat -L POSTROUTING_DMZ | grep 186 | sort"
|
||||
$ R "iptables -t nat -L PREROUTING_DMZ | grep 186 | sort"
|
||||
$ R "iptables -L FORWARD_DMZ | grep 186 | sort"
|
||||
@@ -0,0 +1,52 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Get initial state of bridges:
|
||||
|
||||
$ R "brctl show | cut -d$'\t' -f1,4-"
|
||||
bridge name\tSTP enabled\tinterfaces (esc)
|
||||
br-guest\tno\t\t (esc)
|
||||
br-lan\tno\t\teth0_1 (esc)
|
||||
\t\t\t\t\teth0_2 (esc)
|
||||
\t\t\t\t\teth0_3 (esc)
|
||||
\t\t\t\t\teth0_4 (esc)
|
||||
|
||||
Remove eth0_1 from LAN bridge and add it to the Guest bridge:
|
||||
|
||||
$ printf ' \
|
||||
> ubus-cli Bridging.Bridge.lan.Port.ETH0_1-\n
|
||||
> ubus-cli Bridging.Bridge.guest.Port.+{Name="eth0_1", Alias="ETH0_1"}\n
|
||||
> ubus-cli Bridging.Bridge.guest.Port.ETH0_1.Enable=1\n
|
||||
> ' > /tmp/run
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/run)'" > /dev/null
|
||||
$ sleep 10
|
||||
|
||||
Check that eth0_1 is added to Guest bridge:
|
||||
|
||||
$ R "brctl show | cut -d$'\t' -f1,4-"
|
||||
bridge name\tSTP enabled\tinterfaces (esc)
|
||||
br-guest\tno\t\teth0_1 (esc)
|
||||
br-lan\tno\t\teth0_2 (esc)
|
||||
\t\t\t\t\teth0_3 (esc)
|
||||
\t\t\t\t\teth0_4 (esc)
|
||||
|
||||
Remove eth0_1 from the Guest bridge and add it back to the LAN bridge:
|
||||
|
||||
$ printf '\
|
||||
> ubus-cli Bridging.Bridge.guest.Port.ETH0_1-\n
|
||||
> ubus-cli Bridging.Bridge.lan.Port.+{Name="eth0_1", Alias="ETH0_1"}\n
|
||||
> ubus-cli Bridging.Bridge.lan.Port.ETH0_1.Enable=1\n
|
||||
> ' > /tmp/run
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/run)'" > /dev/null
|
||||
$ sleep 10
|
||||
|
||||
Check for initial state of bridges again:
|
||||
|
||||
$ R "brctl show | cut -d$'\t' -f1,4-"
|
||||
bridge name\tSTP enabled\tinterfaces (esc)
|
||||
br-guest\tno\t\t (esc)
|
||||
br-lan\tno\t\teth0_1 (esc)
|
||||
\t\t\t\t\teth0_2 (esc)
|
||||
\t\t\t\t\teth0_3 (esc)
|
||||
\t\t\t\t\teth0_4 (esc)
|
||||
@@ -0,0 +1,96 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Check that Sandbox is not configured properly:
|
||||
|
||||
$ R "ubus -S call Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.1 _get"
|
||||
[4]
|
||||
|
||||
$ R "ubus -S call Cthulhu.Config _get | jsonfilter -e @[*].DhcpCommand"
|
||||
|
||||
|
||||
Configure Sandbox:
|
||||
|
||||
$ cat > /tmp/run-sandbox <<EOF
|
||||
> ubus-cli Cthulhu.Config.DhcpCommand=\"udhcpc -r 192.168.1.200 -i\"
|
||||
> ubus-cli Cthulhu.Sandbox.Instances.1.NetworkNS.Type="Veth"
|
||||
> ubus-cli Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.+
|
||||
> ubus-cli Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.1.Bridge="br-lan"
|
||||
> ubus-cli Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.1.Interface="eth0"
|
||||
> ubus-cli Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.1.EnableDhcp=1
|
||||
> ubus-cli Cthulhu.Sandbox.Instances.1.NetworkNS.Enable=1
|
||||
> ubus-cli "Cthulhu.Sandbox.start(SandboxId=\"generic\")"
|
||||
> EOF
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/run-sandbox)'" > /dev/null
|
||||
$ sleep 10
|
||||
|
||||
Check that Sandbox was configured properly:
|
||||
|
||||
$ R "ubus -S call Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.1 _get"
|
||||
{"Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.1.":{"EnableDhcp":true,"Interface":"eth0","Bridge":"br-lan"}}
|
||||
|
||||
$ R "ubus -S call Cthulhu.Config _get | jsonfilter -e @[*].DhcpCommand"
|
||||
udhcpc -r 192.168.1.200 -i
|
||||
|
||||
Install testing prplOS container v1:
|
||||
|
||||
$ cat > /tmp/run-container <<EOF
|
||||
> ubus-cli "SoftwareModules.InstallDU(URL=\"docker://registry.gitlab.com/prpl-foundation/prplos/prplos/prplos-testing-container-intel_mips-xrx500:v1\", UUID=\"prplos-testing\", ExecutionEnvRef=\"generic\")"
|
||||
> EOF
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/run-container)'" > /dev/null
|
||||
|
||||
Check that prplOS container v1 is running:
|
||||
|
||||
$ sleep 50
|
||||
|
||||
$ R "ubus -S call Cthulhu.Container.Instances.1 _get | jsonfilter -e @[*].Status -e @[*].Bundle -e @[*].BundleVersion -e @[*].ContainerId -e @[*].Alias | sort"
|
||||
Running
|
||||
cpe-prplos-testing
|
||||
prplos-testing
|
||||
prplos/prplos-testing-container-intel_mips-xrx500
|
||||
v1
|
||||
|
||||
$ R "ssh -y root@192.168.1.200 'cat /etc/container-version' 2> /dev/null"
|
||||
1
|
||||
|
||||
Update to prplOS container v2:
|
||||
|
||||
$ cat > /tmp/run-container <<EOF
|
||||
> ubus-cli "SoftwareModules.DeploymentUnit.cpe-prplos-testing.Update(URL=\"docker://registry.gitlab.com/prpl-foundation/prplos/prplos/prplos-testing-container-intel_mips-xrx500:v2\")"
|
||||
> EOF
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/run-container)'" > /dev/null
|
||||
|
||||
Check that prplOS container v2 is running:
|
||||
|
||||
$ sleep 40
|
||||
|
||||
$ R "ubus -S call Cthulhu.Container.Instances.2 _get | jsonfilter -e @[*].Status -e @[*].Bundle -e @[*].BundleVersion -e @[*].ContainerId -e @[*].Alias | sort"
|
||||
Running
|
||||
cpe-prplos-testing
|
||||
prplos-testing
|
||||
prplos/prplos-testing-container-intel_mips-xrx500
|
||||
v2
|
||||
|
||||
$ R "ssh -y root@192.168.1.200 'cat /etc/container-version' 2> /dev/null"
|
||||
2
|
||||
|
||||
Uninstall prplOS testing container:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP 'ubus-cli SoftwareModules.DeploymentUnit.cpe-prplos-testing.Uninstall\(\)'" > /dev/null; sleep 5
|
||||
|
||||
Check that prplOS container is not running:
|
||||
|
||||
$ R "ubus -S call Cthulhu.Container.Instances.2 _get"
|
||||
[4]
|
||||
|
||||
Check that Rlyeh has no container images:
|
||||
|
||||
$ R "ubus -S call Rlyeh.Images _get"
|
||||
{"Rlyeh.Images.":{}}
|
||||
|
||||
Check that container image is gone from the filesystem as well:
|
||||
|
||||
$ R "ls -al /usr/share/rlyeh/images/prplos"
|
||||
ls: /usr/share/rlyeh/images/prplos: No such file or directory
|
||||
[1]
|
||||
32
.gitlab/tests/cram/turris-omnia/010-network.t
Normal file
32
.gitlab/tests/cram/turris-omnia/010-network.t
Normal file
@@ -0,0 +1,32 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Check correct routing table:
|
||||
|
||||
$ R ip route
|
||||
default via 10.0.0.1 dev eth2 proto static src 10.0.0.2
|
||||
10.0.0.0/24 dev eth2 proto kernel scope link src 10.0.0.2
|
||||
192.168.1.0/24 dev br-lan proto kernel scope link src 192.168.1.1
|
||||
192.168.2.0/24 dev br-guest proto kernel scope link src 192.168.2.1 linkdown
|
||||
|
||||
Check correct interface setup:
|
||||
|
||||
$ R "ip link | grep ^\\\\d | cut -d: -f2-" | LC_ALL=C sort
|
||||
br-guest: <NO-CARRIER,BROADCAST,UP> mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default qlen 1000
|
||||
br-lan: <BROADCAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP mode DEFAULT group default qlen 1000
|
||||
eth0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 532
|
||||
eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 532
|
||||
eth2: <BROADCAST,UP,LOWER_UP> mtu 1500 qdisc htb state UP mode DEFAULT group default qlen 532
|
||||
ifb0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 32
|
||||
ifb1: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 32
|
||||
lan0@eth1: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue master br-lan state LOWERLAYERDOWN mode DEFAULT group default qlen 1000
|
||||
lan1@eth1: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue master br-lan state LOWERLAYERDOWN mode DEFAULT group default qlen 1000
|
||||
lan2@eth1: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue master br-lan state LOWERLAYERDOWN mode DEFAULT group default qlen 1000
|
||||
lan3@eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master br-lan state UP mode DEFAULT group default qlen 1000
|
||||
lan4@eth1: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue master br-lan state LOWERLAYERDOWN mode DEFAULT group default qlen 1000
|
||||
lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
|
||||
teql0: <NOARP> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 100
|
||||
veth_gene_0@if2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master br-lan state UP mode DEFAULT group default qlen 1000
|
||||
wlan0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
|
||||
wlan1: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
|
||||
10
.gitlab/tests/cram/turris-omnia/020-ubus.t
Normal file
10
.gitlab/tests/cram/turris-omnia/020-ubus.t
Normal file
@@ -0,0 +1,10 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Check that we've correct system info:
|
||||
|
||||
$ R "ubus call system board | jsonfilter -e @.system -e @.model -e @.board_name"
|
||||
ARMv7 Processor rev 1 (v7l)
|
||||
Turris Omnia
|
||||
cznic,turris-omnia
|
||||
9
.gitlab/tests/cram/turris-omnia/030-wifi.t
Normal file
9
.gitlab/tests/cram/turris-omnia/030-wifi.t
Normal file
@@ -0,0 +1,9 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Check for correct SSID setup:
|
||||
|
||||
$ R "iwinfo | grep ESSID"
|
||||
wlan0 ESSID: unknown
|
||||
wlan1 ESSID: unknown
|
||||
73
.gitlab/tests/cram/turris-omnia/100-prplmesh.t
Normal file
73
.gitlab/tests/cram/turris-omnia/100-prplmesh.t
Normal file
@@ -0,0 +1,73 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Start wireless:
|
||||
|
||||
$ R logger -t cram "Start wireless"
|
||||
$ R "uci set wireless.radio0.disabled='0'; uci set wireless.radio1.disabled='0'; uci commit; wifi up"
|
||||
$ sleep 30
|
||||
|
||||
Check that hostapd is operating after reboot:
|
||||
|
||||
$ R logger -t cram "Check that hostapd is operating after reboot"
|
||||
$ R "ps w" | sed -nE 's/.*(\/usr\/sbin\/hostapd.*)/\1/p' | LC_ALL=C sort
|
||||
/usr/sbin/hostapd -s -P /var/run/wifi-phy0.pid -B /var/run/hostapd-phy0.conf
|
||||
/usr/sbin/hostapd -s -P /var/run/wifi-phy1.pid -B /var/run/hostapd-phy1.conf
|
||||
|
||||
Restart prplmesh:
|
||||
|
||||
$ R logger -t cram "Restart prplmesh"
|
||||
$ R "/etc/init.d/prplmesh gateway_mode && sleep 5" > /dev/null 2>&1
|
||||
$ sleep 60
|
||||
|
||||
Check VAP setup after restart:
|
||||
|
||||
$ R logger -t cram "Check VAP setup after restart"
|
||||
$ R "iwinfo | grep ESSID"
|
||||
wlan0 ESSID: "prplOS"
|
||||
wlan0-1 ESSID: "prplOS-guest"
|
||||
wlan1 ESSID: "prplOS"
|
||||
wlan1-1 ESSID: "prplOS-guest"
|
||||
|
||||
Check that prplmesh processes are running:
|
||||
|
||||
$ R logger -t cram "Check that prplmesh processes are running"
|
||||
$ R "ps w" | sed -nE 's/.*(\/opt\/prplmesh\/bin.*)/\1/p' | LC_ALL=C sort
|
||||
/opt/prplmesh/bin/beerocks_agent
|
||||
/opt/prplmesh/bin/beerocks_controller
|
||||
/opt/prplmesh/bin/beerocks_fronthaul -i wlan0
|
||||
/opt/prplmesh/bin/beerocks_fronthaul -i wlan1
|
||||
/opt/prplmesh/bin/ieee1905_transport
|
||||
|
||||
Check that prplmesh is operational:
|
||||
|
||||
$ R logger -t cram "Check that prplmesh is operational"
|
||||
$ R "/opt/prplmesh/scripts/prplmesh_utils.sh status" | sed -E 's/.*(\/opt\/prplmesh.*)/\1/' | LC_ALL=C sort
|
||||
\x1b[0m (esc)
|
||||
\x1b[0m\x1b[1;32mOK Main radio agent operational (esc)
|
||||
\x1b[1;32moperational test success! (esc)
|
||||
/opt/prplmesh/bin/beerocks_agent
|
||||
/opt/prplmesh/bin/beerocks_controller
|
||||
/opt/prplmesh/bin/beerocks_controller
|
||||
/opt/prplmesh/bin/beerocks_fronthaul
|
||||
/opt/prplmesh/bin/beerocks_fronthaul
|
||||
/opt/prplmesh/bin/ieee1905_transport
|
||||
/opt/prplmesh/scripts/prplmesh_utils.sh: status
|
||||
OK wlan0 radio agent operational
|
||||
OK wlan1 radio agent operational
|
||||
executing operational test using bml
|
||||
|
||||
Check that prplmesh is in operational state:
|
||||
|
||||
$ R logger -t cram "Check that prplmesh is in operational state"
|
||||
$ R "/opt/prplmesh/bin/beerocks_cli -c bml_conn_map" | egrep '(wlan|OK)' | sed -E "s/.*: (wlan[0-9.]+) .*/\1/" | LC_ALL=C sort
|
||||
bml_connect: return value is: BML_RET_OK, Success status
|
||||
bml_disconnect: return value is: BML_RET_OK, Success status
|
||||
bml_nw_map_query: return value is: BML_RET_OK, Success status
|
||||
wlan0
|
||||
wlan0.0
|
||||
wlan0.1
|
||||
wlan1
|
||||
wlan1.0
|
||||
wlan1.1
|
||||
@@ -0,0 +1,35 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Add DMZ host:
|
||||
|
||||
$ printf "\
|
||||
> ubus-cli Firewall.X_Prpl_DMZ+{Alias='test'}
|
||||
> ubus-cli Firewall.X_Prpl_DMZ.test.SourceInterface=eth2
|
||||
> ubus-cli Firewall.X_Prpl_DMZ.test.DestinationIPAddress=192.168.1.186
|
||||
> ubus-cli Firewall.X_Prpl_DMZ.test.Enable=1
|
||||
> " > /tmp/cram
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/cram)'" > /dev/null; sleep 1
|
||||
|
||||
Check that correct firewall rules were created:
|
||||
|
||||
$ R "iptables -t nat -L POSTROUTING_DMZ | grep 186 | sort"
|
||||
SNAT all -- prplOS.lan 192.168.1.186 to:10.0.0.2
|
||||
|
||||
$ R "iptables -t nat -L PREROUTING_DMZ | grep 186 | sort"
|
||||
DNAT all -- anywhere 10.0.0.2 to:192.168.1.186
|
||||
|
||||
$ R "iptables -L FORWARD_DMZ | grep 186 | sort"
|
||||
ACCEPT all -- 192.168.1.186 anywhere
|
||||
ACCEPT all -- anywhere 192.168.1.186
|
||||
|
||||
Remove DMZ host:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP ubus-cli Firewall.X_Prpl_DMZ.test-" > /dev/null; sleep 1
|
||||
|
||||
Check that firewall rules are gone:
|
||||
|
||||
$ R "iptables -t nat -L POSTROUTING_DMZ | grep 186 | sort"
|
||||
$ R "iptables -t nat -L PREROUTING_DMZ | grep 186 | sort"
|
||||
$ R "iptables -L FORWARD_DMZ | grep 186 | sort"
|
||||
@@ -0,0 +1,55 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Get initial state of bridges:
|
||||
|
||||
$ R "brctl show | cut -d$'\t' -f1,4-"
|
||||
bridge name\tSTP enabled\tinterfaces (esc)
|
||||
br-guest\tno (esc)
|
||||
br-lan\tno\t\tlan0 (esc)
|
||||
\t\t\t\t\tlan1 (esc)
|
||||
\t\t\t\t\tlan2 (esc)
|
||||
\t\t\t\t\tlan3 (esc)
|
||||
\t\t\t\t\tlan4 (esc)
|
||||
|
||||
Remove lan4 from LAN bridge and add it to the Guest bridge:
|
||||
|
||||
$ printf ' \
|
||||
> ubus-cli Bridging.Bridge.lan.Port.LAN4-\n
|
||||
> ubus-cli Bridging.Bridge.guest.Port.+{Name="lan4", Alias="LAN4"}\n
|
||||
> ubus-cli Bridging.Bridge.guest.Port.LAN4.Enable=1\n
|
||||
> ' > /tmp/run
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/run)'" > /dev/null
|
||||
$ sleep 5
|
||||
|
||||
Check that lan4 is added to Guest bridge:
|
||||
|
||||
$ R "brctl show | cut -d$'\t' -f1,4-"
|
||||
bridge name\tSTP enabled\tinterfaces (esc)
|
||||
br-guest\tno\t\tlan4 (esc)
|
||||
br-lan\tno\t\tlan0 (esc)
|
||||
\t\t\t\t\tlan1 (esc)
|
||||
\t\t\t\t\tlan2 (esc)
|
||||
\t\t\t\t\tlan3 (esc)
|
||||
|
||||
Remove lan4 from the Guest bridge and add it back to the LAN bridge:
|
||||
|
||||
$ printf '\
|
||||
> ubus-cli Bridging.Bridge.guest.Port.LAN4-\n
|
||||
> ubus-cli Bridging.Bridge.lan.Port.+{Name="lan4", Alias="LAN4"}\n
|
||||
> ubus-cli Bridging.Bridge.lan.Port.LAN4.Enable=1\n
|
||||
> ' > /tmp/run
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/run)'" > /dev/null
|
||||
$ sleep 5
|
||||
|
||||
Check for initial state of bridges again:
|
||||
|
||||
$ R "brctl show | cut -d$'\t' -f1,4-"
|
||||
bridge name\tSTP enabled\tinterfaces (esc)
|
||||
br-guest\tno (esc)
|
||||
br-lan\tno\t\tlan0 (esc)
|
||||
\t\t\t\t\tlan1 (esc)
|
||||
\t\t\t\t\tlan2 (esc)
|
||||
\t\t\t\t\tlan3 (esc)
|
||||
\t\t\t\t\tlan4 (esc)
|
||||
@@ -0,0 +1,96 @@
|
||||
Create R alias:
|
||||
|
||||
$ alias R="${CRAM_REMOTE_COMMAND:-}"
|
||||
|
||||
Check that Sandbox is not configured properly:
|
||||
|
||||
$ R "ubus -S call Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.1 _get"
|
||||
[4]
|
||||
|
||||
$ R "ubus -S call Cthulhu.Config _get | jsonfilter -e @[*].DhcpCommand"
|
||||
|
||||
|
||||
Configure Sandbox:
|
||||
|
||||
$ cat > /tmp/run-sandbox <<EOF
|
||||
> ubus-cli Cthulhu.Config.DhcpCommand=\"udhcpc -r 192.168.1.200 -i\"
|
||||
> ubus-cli Cthulhu.Sandbox.Instances.1.NetworkNS.Type="Veth"
|
||||
> ubus-cli Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.+
|
||||
> ubus-cli Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.1.Bridge="br-lan"
|
||||
> ubus-cli Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.1.Interface="eth0"
|
||||
> ubus-cli Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.1.EnableDhcp=1
|
||||
> ubus-cli Cthulhu.Sandbox.Instances.1.NetworkNS.Enable=1
|
||||
> ubus-cli "Cthulhu.Sandbox.start(SandboxId=\"generic\")"
|
||||
> EOF
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/run-sandbox)'" > /dev/null
|
||||
$ sleep 10
|
||||
|
||||
Check that Sandbox was configured properly:
|
||||
|
||||
$ R "ubus -S call Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.1 _get"
|
||||
{"Cthulhu.Sandbox.Instances.1.NetworkNS.Interfaces.1.":{"EnableDhcp":true,"Interface":"eth0","Bridge":"br-lan"}}
|
||||
|
||||
$ R "ubus -S call Cthulhu.Config _get | jsonfilter -e @[*].DhcpCommand"
|
||||
udhcpc -r 192.168.1.200 -i
|
||||
|
||||
Install testing prplOS container v1:
|
||||
|
||||
$ cat > /tmp/run-container <<EOF
|
||||
> ubus-cli "SoftwareModules.InstallDU(URL=\"docker://registry.gitlab.com/prpl-foundation/prplos/prplos/prplos-testing-container-mvebu-cortexa9:v1\", UUID=\"prplos-testing\", ExecutionEnvRef=\"generic\")"
|
||||
> EOF
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/run-container)'" > /dev/null
|
||||
|
||||
Check that prplOS container v1 is running:
|
||||
|
||||
$ sleep 30
|
||||
|
||||
$ R "ubus -S call Cthulhu.Container.Instances.1 _get | jsonfilter -e @[*].Status -e @[*].Bundle -e @[*].BundleVersion -e @[*].ContainerId -e @[*].Alias | sort"
|
||||
Running
|
||||
cpe-prplos-testing
|
||||
prplos-testing
|
||||
prplos/prplos-testing-container-mvebu-cortexa9
|
||||
v1
|
||||
|
||||
$ R "ssh -y root@192.168.1.200 'cat /etc/container-version' 2> /dev/null"
|
||||
1
|
||||
|
||||
Update to prplOS container v2:
|
||||
|
||||
$ cat > /tmp/run-container <<EOF
|
||||
> ubus-cli "SoftwareModules.DeploymentUnit.cpe-prplos-testing.Update(URL=\"docker://registry.gitlab.com/prpl-foundation/prplos/prplos/prplos-testing-container-mvebu-cortexa9:v2\")"
|
||||
> EOF
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP '$(cat /tmp/run-container)'" > /dev/null
|
||||
|
||||
Check that prplOS container v2 is running:
|
||||
|
||||
$ sleep 30
|
||||
|
||||
$ R "ubus -S call Cthulhu.Container.Instances.2 _get | jsonfilter -e @[*].Status -e @[*].Bundle -e @[*].BundleVersion -e @[*].ContainerId -e @[*].Alias | sort"
|
||||
Running
|
||||
cpe-prplos-testing
|
||||
prplos-testing
|
||||
prplos/prplos-testing-container-mvebu-cortexa9
|
||||
v2
|
||||
|
||||
$ R "ssh -y root@192.168.1.200 'cat /etc/container-version' 2> /dev/null"
|
||||
2
|
||||
|
||||
Uninstall prplOS testing container:
|
||||
|
||||
$ script --command "ssh -t root@$TARGET_LAN_IP 'ubus-cli SoftwareModules.DeploymentUnit.cpe-prplos-testing.Uninstall\(\)'" > /dev/null; sleep 5
|
||||
|
||||
Check that prplOS container is not running:
|
||||
|
||||
$ R "ubus -S call Cthulhu.Container.Instances.2 _get"
|
||||
[4]
|
||||
|
||||
Check that Rlyeh has no container images:
|
||||
|
||||
$ R "ubus -S call Rlyeh.Images _get"
|
||||
{"Rlyeh.Images.":{}}
|
||||
|
||||
Check that container image is gone from the filesystem as well:
|
||||
|
||||
$ R "ls -al /usr/share/rlyeh/images/prplos"
|
||||
ls: /usr/share/rlyeh/images/prplos: No such file or directory
|
||||
[1]
|
||||
11
.testbed/README.md
Normal file
11
.testbed/README.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Content
|
||||
|
||||
This directory contains bits needed for testbed.
|
||||
|
||||
## cdrouter
|
||||
|
||||
Contains CDRouter configurations and test packages definitions for runtime testing on real devices using [CDRouter](https://www.qacafe.com/test-solutions/cdrouter-benefits/cdrouter-technology/).
|
||||
|
||||
## labgrid
|
||||
|
||||
Provides content used during runtime testing on real device using [labgrid](https://labgrid.readthedocs.io/en/latest/) Python testing framework.
|
||||
2677
.testbed/cdrouter/configurations/generic
Normal file
2677
.testbed/cdrouter/configurations/generic
Normal file
File diff suppressed because it is too large
Load Diff
BIN
.testbed/cdrouter/packages/CDRouter-IPv6-Top-100.gz
Normal file
BIN
.testbed/cdrouter/packages/CDRouter-IPv6-Top-100.gz
Normal file
Binary file not shown.
BIN
.testbed/cdrouter/packages/CDRouter-TR-069.gz
Normal file
BIN
.testbed/cdrouter/packages/CDRouter-TR-069.gz
Normal file
Binary file not shown.
BIN
.testbed/cdrouter/packages/CDRouter-Top-100.gz
Normal file
BIN
.testbed/cdrouter/packages/CDRouter-Top-100.gz
Normal file
Binary file not shown.
257
.testbed/labgrid/default.yaml
Normal file
257
.testbed/labgrid/default.yaml
Normal file
@@ -0,0 +1,257 @@
|
||||
targets:
|
||||
|
||||
nec-wx3000hp-nand:
|
||||
resources:
|
||||
RawSerialPort:
|
||||
port: "/dev/nec-wx3000hp"
|
||||
drivers:
|
||||
ExternalPowerDriver:
|
||||
cmd_on: ssh root@uart-relay.testbed.vpn.true.cz power_on 2
|
||||
cmd_off: ssh root@uart-relay.testbed.vpn.true.cz power_off 2
|
||||
SerialDriver: {}
|
||||
ShellDriver:
|
||||
console_ready: Please press Enter to activate this console.
|
||||
prompt: 'root@[\w()]+:[^ ]+ '
|
||||
login_prompt: built-in shell (ash)
|
||||
username: kingbanik
|
||||
SmallUBootDriver:
|
||||
prompt: 'GRX500 #'
|
||||
boot_secret: ''
|
||||
boot_expression: 'Hit any key to stop autoboot'
|
||||
init_command_timeout: 60
|
||||
init_commands:
|
||||
- setenv bootdelay 5
|
||||
- setenv serverip 192.168.1.2
|
||||
- setenv ipaddr 192.168.1.1
|
||||
- setenv bootargs debug earlyprintk console=ttyLTQ0,115200
|
||||
- setenv fullimage AX3000_1600_ETH_11AXUCI_ASURADA-squashfs-fullimage.img
|
||||
- ubi part system_sw
|
||||
- ubi remove rootfs_data
|
||||
- ubi create rootfs_data 0x2000000 dynamic
|
||||
- run update_fullimage
|
||||
- run run_bootcore
|
||||
- ubi read $loadaddr $kernel_vol
|
||||
- run flashargs addmisc
|
||||
UBootStrategy: {}
|
||||
|
||||
nec-wx3000hp-firstboot:
|
||||
resources:
|
||||
RawSerialPort:
|
||||
port: "/dev/nec-wx3000hp"
|
||||
drivers:
|
||||
ExternalPowerDriver:
|
||||
cmd_on: ssh root@uart-relay.testbed.vpn.true.cz power_on 2
|
||||
cmd_off: ssh root@uart-relay.testbed.vpn.true.cz power_off 2
|
||||
SerialDriver: {}
|
||||
ShellDriver:
|
||||
console_ready: Please press Enter to activate this console.
|
||||
prompt: 'root@[\w()]+:[^ ]+ '
|
||||
login_prompt: built-in shell (ash)
|
||||
username: kingbanik
|
||||
SmallUBootDriver:
|
||||
prompt: 'GRX500 #'
|
||||
boot_secret: ''
|
||||
boot_expression: 'Hit any key to stop autoboot'
|
||||
init_command_timeout: 60
|
||||
init_commands:
|
||||
- setenv bootargs debug earlyprintk console=ttyLTQ0,115200
|
||||
- ubi part system_sw
|
||||
- ubi remove rootfs_data
|
||||
- ubi create rootfs_data 0x2000000 dynamic
|
||||
- ubi read $loadaddr $kernel_vol
|
||||
- run flashargs addmisc
|
||||
UBootStrategy: {}
|
||||
|
||||
netgear-rax40-nand:
|
||||
resources:
|
||||
RawSerialPort:
|
||||
port: "/dev/netgear-rax40"
|
||||
drivers:
|
||||
ExternalPowerDriver:
|
||||
cmd_on: ssh root@uart-relay.testbed.vpn.true.cz power_on 1
|
||||
cmd_off: ssh root@uart-relay.testbed.vpn.true.cz power_off 1
|
||||
SerialDriver: {}
|
||||
ShellDriver:
|
||||
console_ready: Please press Enter to activate this console.
|
||||
prompt: 'root@[\w()]+:[^ ]+ '
|
||||
login_prompt: built-in shell (ash)
|
||||
username: kingbanik
|
||||
SmallUBootDriver:
|
||||
prompt: 'GRX500 #'
|
||||
boot_secret: ''
|
||||
boot_expression: 'Hit any key to stop autoboot'
|
||||
init_command_timeout: 60
|
||||
init_commands:
|
||||
- setenv bootdelay 0
|
||||
- setenv serverip 192.168.1.2
|
||||
- setenv ipaddr 192.168.1.1
|
||||
- setenv bootargs debug earlyprintk console=ttyLTQ0,115200
|
||||
- setenv fullimage NETGEAR_RAX40-squashfs-fullimage.img
|
||||
- ubi part system_sw
|
||||
- ubi remove rootfs_data
|
||||
- ubi create rootfs_data 0x2000000 dynamic 5
|
||||
- run update_fullimage
|
||||
- run run_bootcore
|
||||
- ubi read $(loadaddr) $(kernel_vol)
|
||||
- run flashargs addmisc
|
||||
UBootStrategy: {}
|
||||
|
||||
netgear-rax40-initramfs:
|
||||
resources:
|
||||
RawSerialPort:
|
||||
port: "/dev/ttyS1"
|
||||
drivers:
|
||||
ExternalPowerDriver:
|
||||
cmd_on: ssh root@uart-relay.testbed.vpn.true.cz power_on 1
|
||||
cmd_off: ssh root@uart-relay.testbed.vpn.true.cz power_off 1
|
||||
SerialDriver: {}
|
||||
ShellDriver:
|
||||
console_ready: Please press Enter to activate this console.
|
||||
prompt: 'root@[\w()]+:[^ ]+ '
|
||||
login_prompt: built-in shell (ash)
|
||||
username: kingbanik
|
||||
SmallUBootDriver:
|
||||
prompt: 'GRX500 #'
|
||||
boot_secret: ''
|
||||
boot_expression: 'Hit any key to stop autoboot'
|
||||
init_command_timeout: 60
|
||||
init_commands:
|
||||
- setenv bootdelay 0
|
||||
- setenv serverip 192.168.1.2
|
||||
- setenv ipaddr 192.168.1.1
|
||||
- setenv bootargs debug earlyprintk console=ttyLTQ0,115200
|
||||
- tftpboot 0x8f500000 NETGEAR_RAX40-initramfs-kernel.bin
|
||||
UBootStrategy: {}
|
||||
|
||||
tplink-c6v2-initramfs:
|
||||
resources:
|
||||
RawSerialPort:
|
||||
port: "/dev/tplink-c6v2"
|
||||
drivers:
|
||||
ExternalPowerDriver:
|
||||
cmd_on: ssh root@uart-relay.testbed.vpn.true.cz power_on 0
|
||||
cmd_off: ssh root@uart-relay.testbed.vpn.true.cz power_off 0
|
||||
SerialDriver: {}
|
||||
ShellDriver:
|
||||
console_ready: Please press Enter to activate this console.
|
||||
prompt: 'root@[\w()]+:[^ ]+ '
|
||||
login_prompt: built-in shell (ash)
|
||||
await_login_timeout: 15
|
||||
username: kingbanik
|
||||
SmallUBootDriver:
|
||||
prompt: 'ath> '
|
||||
boot_expression: 'Hit any key to stop autoboot'
|
||||
init_commands:
|
||||
- setenv bootdelay 0
|
||||
- setenv serverip 192.168.1.2
|
||||
- setenv ipaddr 192.168.1.1
|
||||
- setenv bootargs debug earlyprintk console=ttyS0,115200
|
||||
- ping 192.168.1.2; ping 192.168.1.2; ping 192.168.1.2
|
||||
- tftpboot 0x81000000 openwrt-ath79-generic-tplink_archer-c6-v2-initramfs-kernel.bin
|
||||
UBootStrategy: {}
|
||||
|
||||
glinet-b1300-initramfs:
|
||||
resources:
|
||||
RawSerialPort:
|
||||
port: "/dev/glinet-b1300"
|
||||
drivers:
|
||||
ExternalPowerDriver:
|
||||
cmd_on: ssh root@uart-relay.testbed.vpn.true.cz power_on 1
|
||||
cmd_off: ssh root@uart-relay.testbed.vpn.true.cz power_off 1
|
||||
SerialDriver: {}
|
||||
ShellDriver:
|
||||
console_ready: Please press Enter to activate this console.
|
||||
prompt: 'root@[\w()]+:[^ ]+ '
|
||||
login_prompt: built-in shell (ash)
|
||||
await_login_timeout: 15
|
||||
username: kingbanik
|
||||
UBootDriver:
|
||||
prompt: '\(IPQ40xx\) # '
|
||||
password: 'gl'
|
||||
password_prompt: 'Hit "gl" key to stop booting'
|
||||
init_commands:
|
||||
- setenv bootdelay 0
|
||||
- setenv serverip 192.168.1.2
|
||||
- setenv ipaddr 192.168.1.1
|
||||
boot_command_delay: 5
|
||||
boot_timeout: 90
|
||||
boot_command: tftpboot 0x88000000 openwrt-ipq40xx-generic-glinet_gl-b1300-initramfs-fit-uImage.itb && bootm 0x88000000
|
||||
UBootStrategy: {}
|
||||
|
||||
glinet-b1300-nor:
|
||||
resources:
|
||||
RawSerialPort:
|
||||
port: "/dev/glinet-b1300"
|
||||
drivers:
|
||||
ExternalPowerDriver:
|
||||
cmd_on: ssh root@uart-relay.testbed.vpn.true.cz power_on 1
|
||||
cmd_off: ssh root@uart-relay.testbed.vpn.true.cz power_off 1
|
||||
SerialDriver: {}
|
||||
ShellDriver:
|
||||
console_ready: Please press Enter to activate this console.
|
||||
prompt: 'root@[\w()]+:[^ ]+ '
|
||||
login_prompt: built-in shell (ash)
|
||||
await_login_timeout: 15
|
||||
username: kingbanik
|
||||
UBootDriver:
|
||||
prompt: '\(IPQ40xx\) # '
|
||||
password: 'gl'
|
||||
password_prompt: 'Hit "gl" key to stop booting'
|
||||
init_commands:
|
||||
- setenv bootdelay 0
|
||||
- setenv serverip 192.168.1.2
|
||||
- setenv ipaddr 192.168.1.1
|
||||
boot_command_delay: 5
|
||||
boot_timeout: 180
|
||||
boot_command: tftpboot 0x84000000 openwrt-ipq40xx-generic-glinet_gl-b1300-squashfs-sysupgrade.bin && sf probe && sf erase 0x180000 0x1e80000 && sf write 0x84000000 0x180000 $filesize && bootipq
|
||||
UBootStrategy: {}
|
||||
|
||||
# needs patched u-boot for emmc r/w https://lists.denx.de/pipermail/u-boot/2021-February/441964.html
|
||||
turris-omnia-emmc:
|
||||
resources:
|
||||
RawSerialPort:
|
||||
port: "/dev/turris-omnia"
|
||||
drivers:
|
||||
ExternalPowerDriver:
|
||||
cmd_on: ssh root@uart-relay.testbed.vpn.true.cz power_on 0
|
||||
cmd_off: ssh root@uart-relay.testbed.vpn.true.cz power_off 0
|
||||
SerialDriver:
|
||||
txdelay: 0.01
|
||||
ShellDriver:
|
||||
console_ready: Please press Enter to activate this console.
|
||||
prompt: 'root@[\w()]+:[^ ]+ '
|
||||
login_prompt: built-in shell (ash)
|
||||
await_login_timeout: 15
|
||||
username: kingbanik
|
||||
UBootDriver:
|
||||
prompt: '=> '
|
||||
init_commands:
|
||||
- setenv bootargs earlyprintk console=ttyS0,115200
|
||||
- setenv set_blkcnt 'setexpr blkcnt ${filesize} + 0x7ffff && setexpr blkcnt ${blkcnt} / 0x80000 && setexpr blkcnt ${blkcnt} * 0x400'
|
||||
- dhcp ${kernel_addr_r} openwrt-mvebu-cortexa9-cznic_turris-omnia-sysupgrade.img
|
||||
boot_command: run set_blkcnt && mmc dev 0 0 && mmc erase 0 ${blkcnt} && mmc write ${kernel_addr_r} 0 ${blkcnt} && run bootcmd
|
||||
UBootStrategy: {}
|
||||
|
||||
turris-omnia-initramfs:
|
||||
resources:
|
||||
RawSerialPort:
|
||||
port: "/dev/turris-omnia"
|
||||
drivers:
|
||||
ExternalPowerDriver:
|
||||
cmd_on: ssh root@uart-relay.testbed.vpn.true.cz power_on 0
|
||||
cmd_off: ssh root@uart-relay.testbed.vpn.true.cz power_off 0
|
||||
SerialDriver:
|
||||
txdelay: 0.01
|
||||
ShellDriver:
|
||||
console_ready: Please press Enter to activate this console.
|
||||
prompt: 'root@[\w()]+:[^ ]+ '
|
||||
login_prompt: built-in shell (ash)
|
||||
await_login_timeout: 15
|
||||
username: kingbanik
|
||||
UBootDriver:
|
||||
prompt: '=> '
|
||||
init_commands:
|
||||
- setenv bootargs earlyprintk console=ttyS0,115200
|
||||
- dhcp ${kernel_addr_r} openwrt-mvebu-cortexa9-cznic_turris-omnia-initramfs-kernel.bin
|
||||
boot_command: bootm ${kernel_addr_r}
|
||||
UBootStrategy: {}
|
||||
Reference in New Issue
Block a user