Compare commits

..

2 Commits

Author SHA1 Message Date
Janusz Dziedzic
004bbc3d3b uci-defaults: l2mode dedicated wanport
Signed-off-by: Janusz Dziedzic <janusz.dziedzic@iopsys.eu>
2025-06-05 10:54:53 +02:00
Janusz Dziedzic
1847d587a3 map-agent: update map-dynamic-backhaul
- fix wpa_cli network_id
 - add/remove eth uplink from bridge
 - don't require dedicated wanport to be part of bridge
2025-06-05 10:54:53 +02:00
143 changed files with 2167 additions and 6492 deletions

View File

@@ -5,13 +5,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=bbfdm
PKG_VERSION:=1.16.6.2
PKG_VERSION:=1.15.31
USE_LOCAL:=0
ifneq ($(USE_LOCAL),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/bbf/bbfdm.git
PKG_SOURCE_VERSION:=aa480554461c82e6f6f44ee6c23108d3e44fce21
PKG_SOURCE_VERSION:=cb405b3b881d055f5eb94d0e244ccadda7558973
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif

View File

@@ -15,13 +15,9 @@
]
},
"dhcp_refresh": {
"if_operator": "OR",
"if" : [
{
"event": "host"
},
{
"event": "wifi.dataelements.Associated"
}
],
"then" : [

View File

@@ -5,13 +5,14 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=bridgemngr
PKG_VERSION:=1.0.18.2
PKG_VERSION:=1.0.16
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/network/bridgemngr
PKG_SOURCE_VERSION:=71ed529be038392071b0399bcfe9d46e89d3cb46
PKG_SOURCE_VERSION:=3947502c81bab2b4e4f5b2c0f8ead41be99488a2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif

View File

@@ -6,12 +6,12 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=decollector
PKG_VERSION:=6.2.1.8
PKG_VERSION:=6.2.1.6
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=b7e294d7c610adfd80cf40a0628c189695dc5156
PKG_SOURCE_VERSION:=2f2beb0e9f27ebebe47cfcc982b8c1a5460e987b
PKG_SOURCE_URL:=https://dev.iopsys.eu/multi-ap/decollector.git
PKG_SOURCE:=$(PKG_NAME)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip

View File

@@ -2,13 +2,13 @@ include $(TOPDIR)/rules.mk
PKG_NAME:=dectmngr
PKG_RELEASE:=3
PKG_VERSION:=3.7.10
PKG_VERSION:=3.7.7
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_URL:=https://dev.iopsys.eu/iopsys/dectmngr.git
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=1f851980a6ba616df54f79930225f8bcd563b711
PKG_SOURCE_VERSION:=289a91b3e7f221f16c976efd147bd4b203420b41
PKG_MIRROR_HASH:=skip
endif

View File

@@ -5,13 +5,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=dnsmngr
PKG_VERSION:=1.0.18
PKG_VERSION:=1.0.17
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/network/dnsmngr.git
PKG_SOURCE_VERSION:=80fa147e6f1f0d9c1a62a62a693ff3adaef45363
PKG_SOURCE_VERSION:=2ceb76e98cf23a8d52ab3f464d38d62385311a87
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif

View File

@@ -8,13 +8,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=dslmngr
PKG_VERSION:=1.2.10
PKG_VERSION:=1.2.9
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/hal/dslmngr.git
PKG_SOURCE_VERSION:=8fb4093b4d26b3cb06603e110d424005e33cf5d6
PKG_SOURCE_VERSION:=5340cb31f759301f5aca3fd848fc3a63b0b4663f
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MAINTAINER:=Rahul Thakur <rahul.thakur@iopsys.eu>
PKG_MIRROR_HASH:=skip
@@ -30,8 +30,6 @@ include ../bbfdm/bbfdm.mk
ifeq ($(CONFIG_TARGET_brcmbca),y)
TARGET_PLATFORM=BROADCOM
else ifneq ($(CONFIG_TARGET_airoha),)
TARGET_PLATFORM=AIROHA
else
$(info Unexpected CONFIG_TARGET)
endif
@@ -43,7 +41,7 @@ define Package/dslmngr
CATEGORY:=Utilities
TITLE:=XDSL status and configration utility
DEPENDS:=+libdsl +libuci +libubox +ubus +libpthread +libnl-genl +libeasy
DEPENDS+=+libbbfdm-api +libbbfdm-ubus +dm-service +TARGET_airoha:br2684ctl
DEPENDS+=+libbbfdm-api +libbbfdm-ubus +dm-service
endef
define Package/dslmngr/description
@@ -77,10 +75,7 @@ define Package/dslmngr/install
$(CP) ./files/common/* $(1)/
ifeq ($(CONFIG_TARGET_brcmbca),y)
$(CP) ./files/broadcom/* $(1)/
else ifneq ($(CONFIG_TARGET_airoha),)
$(CP) ./files/airoha/* $(1)/
endif
$(INSTALL_DIR) $(1)/sbin
$(INSTALL_BIN) $(PKG_BUILD_DIR)/dslmngr $(1)/sbin/
$(BBFDM_REGISTER_SERVICES) ./bbfdm_service.json $(1) $(PKG_NAME)

View File

@@ -1,26 +0,0 @@
#!/bin/sh /etc/rc.common
START=99
STOP=99
USE_PROCD=1
start_service() {
readonly need_xdsl="$(jsonfilter -i /etc/board.json -e @.dsl)"
[ -f /etc/board.json ] || return 0
if [ "$need_xdsl" != "" ]; then
procd_open_instance xdsl_wan
procd_set_param command /sbin/xdsl_wan
procd_set_param respawn
procd_close_instance
fi
}
boot() {
: # boot-up is handled from 'hsm' application
}
service_triggers()
{
procd_add_reload_trigger "dsl"
}

View File

@@ -1,28 +0,0 @@
#!/bin/sh
. /lib/functions.sh
xtm_remove_devices() {
:
}
xtm_create_devices() {
:
}
xdsl_configure() {
# Support bridged WAN interface
ebtables --concurrent -t broute -D BROUTING -p 0xbeed -j DROP 2> /dev/null
ebtables --concurrent -t broute -I BROUTING -p 0xbeed -j DROP
}
xdsl_stop() {
return 0
}
xdsl_init() {
readonly need_xdsl="$(jsonfilter -i /etc/board.json -e @.dsl)"
[ "$need_xdsl" = "" ] && exit 0
echo "Starting DSL"
}

View File

@@ -1,270 +0,0 @@
#!/bin/sh
source "/lib/functions.sh"
source "/lib/functions/network.sh"
source "/lib/functions/system.sh"
PREVLINK=""
LINK=""
LINKSPEED=""
PREVWANMODE=""
WANMODE=""
CONFIGURED=0
CINDEX=0
WANPORT="$(jsonfilter -i /etc/board.json -e @.network.wan.device)"
delete_atm_device() {
/userfs/bin/blapi_cmd xdsl atm_delete_interface $CINDEX
CINDEX=$((CINDEX+1))
}
delete_atm_devices() {
CINDEX=0
config_load dsl
config_foreach delete_atm_device atm-device
}
configure_atm_device() {
local vpi vci encapsulation qos_class pcr mbs scr
local encap qos ethmac
config_get vpi $1 vpi "8"
config_get vci $1 vci "35"
config_get encapsulation $1 encapsulation "llc"
config_get qos_class $1 qos_class "ubr"
config_get pcr $1 pcr "0"
config_get mbs $1 mbs "0"
config_get scr $1 scr "0"
case $encapsulation in # llc, vcmux
vcmux)
encap="1483 Bridged IP VC-Mux"
;;
*)
encap="1483 Bridged IP LLC"
;;
esac
ethmac="$(echo -n "$(macaddr_add 02:AA:BB:01:23:40 $((CINDEX+2)))" | sed 's#:##g')"
/userfs/bin/blapi_cmd xdsl create_device $CINDEX ATM "" "$ethmac"
/userfs/bin/blapi_cmd xdsl atm_create_interface $CINDEX ATM "$qos_class" "$encap" "$vpi" "$vci" "$pcr" "$scr" "$mbs" 0
CINDEX=$((CINDEX+1))
}
create_atm_devices() {
delete_atm_devices
CINDEX=0
config_foreach configure_atm_device atm-device
}
configure_line() {
local mode profile bitswap sra us0 sesdrop sos roc ginp gvector mod prof
local adsl1_flag=0 issue2_flag=0 Glite_flag=0 adsl2_flag=0 adsl2p_flag=0 vdsl2_flag=0
local pro_8a_flag=0 pro_8b_flag=0 pro_8c_flag=0 pro_8d_flag=0 pro_12a_flag=0 pro_12b_flag=0 pro_17a_flag=0 pro_30a_flag=0 pro_35b_flag=0
config_get mode $1 mode "vdsl2"
config_get profile $1 profile "35b"
config_get bitswap $1 bitswap "1"
config_get sra $1 sra "1"
config_get us0 $1 us0 "1"
config_get sos $1 sos "0"
config_get roc $1 roc "0"
config_get ginp $1 ginp "1"
config_get gvector $1 gvector "1"
for mod in $mode; do
[ "$mod" = "gdmt" ] && adsl1_flag=1
[ "$mod" = "glite" ] && Glite_flag=1
[ "$mod" = "t1413" ] && issue2_flag=1
[ "$mod" = "adsl2" ] && adsl2_flag=1
[ "$mod" = "adsl2p" ] && adsl2p_flag=1
[ "$mod" = "vdsl2" ] && vdsl2_flag=1
done
for prof in $profile; do
[ "$prof" = "8a" ] && pro_8a_flag=1
[ "$prof" = "8b" ] && pro_8b_flag=1
[ "$prof" = "8c" ] && pro_8c_flag=1
[ "$prof" = "8d" ] && pro_8d_flag=1
[ "$prof" = "12a" ] && pro_12a_flag=1
[ "$prof" = "12b" ] && pro_12b_flag=1
[ "$prof" = "17a" ] && pro_17a_flag=1
[ "$prof" = "30a" ] && pro_30a_flag=1
[ "$prof" = "35b" ] && pro_35b_flag=1
done
/userfs/bin/blapi_cmd xdsl set_adsl_profile "$pro_8a_flag" "$pro_8b_flag" "$pro_8c_flag" "$pro_8d_flag" "$pro_12a_flag" "$pro_12b_flag" "$pro_17a_flag" "$pro_30a_flag" "$pro_35b_flag"
/userfs/bin/blapi_cmd xdsl set_adsl_mode "$adsl1_flag" "$issue2_flag" "$Glite_flag" "$adsl2_flag" "$adsl2p_flag" "$vdsl2_flag"
/userfs/bin/blapi_cmd xdsl set_adsl_gvector "$((!gvector))"
/userfs/bin/blapi_cmd xdsl set_adsl_ginp "$((!ginp))"
/userfs/bin/blapi_cmd xdsl set_adsl_sos_roc "$((!sos))" "$((!roc))"
/userfs/bin/blapi_cmd xdsl set_adsl_us0 "$((!us0))"
/userfs/bin/blapi_cmd xdsl set_adsl_sra "$((!sra))"
/userfs/bin/blapi_cmd xdsl set_adsl_bitswap "$((!bitswap))"
CONFIGURED=1
}
configure_lines() {
config_load dsl
config_foreach configure_line dsl-line
}
call_wan_hotplug() {
# initializations
local updown="$1"
local ethwan="$2"
# ethernet hotlugs expect LINK and PORT environment variables set
env -i LINK="$updown" PORT="$ethwan" /sbin/hotplug-call ethernet
}
if [ "$WANPORT" = "ae_wan" -a -f /proc/device-tree/ae_wan/wan-dsl ]; then
/etc/init.d/br2684ctl stop
else
/etc/init.d/br2684ctl start
fi
# Wait for nas0 interface to come up.
while [ "$(devstatus "$WANPORT" | jsonfilter -e @.up)" != "true" ]; do
sleep 1
done
while [ true ]; do
LINK="$(awk '/ADSL link status:/{print $4}' /proc/tc3162/adsl_stats)"
[ \( "$LINK" = "down" -o "$LINK" = "up" \) ] && break
sleep 1
done
sleep 2
/userfs/bin/blapi_cmd xdsl set_adsl_sysvid "26 00 47 4E 58 53 00 00" # GNXS vendor id
/userfs/bin/blapi_cmd xdsl set_adsl_version "$(ubus call fwbank dump | jsonfilter -e "@.bank[@.active=true].swver" | cut -f1 -d'_' | cut -f1 -d'-' | hexdump -e '11/1 "%02x " "\n"' | head -n1)"
/userfs/bin/blapi_cmd xdsl set_power_up_down 1
/userfs/bin/blapi_cmd xdsl set_power_up_down 0
sleep 1
while [ true ]; do
LINK="$(awk '/ADSL link status:/{print $4}' /proc/tc3162/adsl_stats)"
if [ "$LINK" != "$PREVLINK" -a \( "$LINK" = "down" -o "$LINK" = "up" \) ]; then
if [ "$LINK" = "down" ]; then
if [ ! -s /tmp/qos/wan_link_shape_rate ]; then
rm -rf /tmp/qos/wan_link_shape_rate
rm -rf /tmp/qos/wan_link_speed
/usr/sbin/qos-uplink-bandwidth
fi
[ "$CONFIGURED" -eq 0 ] && configure_lines # Needs to be done once the slave SoC is in down state and we've not been able to auto-sync.
if [ -n "$WANMODE" ]; then
if [ "$WANMODE" = "PTM" ]; then
/userfs/bin/blapi_cmd xdsl ptm_do_reset_sequence 0 1
else
delete_atm_devices
fi
fi
call_wan_hotplug "down" "$WANPORT"
else
CONFIGURED=1
WANMODE="$(awk '/TPSTC type:/{print $4}' /proc/tc3162/adsl_stats)"
if [ "$WANMODE" != "$PREVWANMODE" ]; then
OLDWANPORT="$WANPORT"
network_defer_device "$OLDWANPORT"
if [ -f /proc/device-tree/ae_wan/wan-dsl ]; then
WANPORT="ae_wan"
else
WANPORT="nas10"
fi
if [ "$WANMODE" = "PTM" ]; then
/etc/init.d/br2684ctl stop
delete_atm_devices
/userfs/bin/blapi_cmd system set_wan_mode 1
/userfs/bin/blapi_cmd xdsl reload_ko 2
/userfs/bin/blapi_cmd xdsl ptm_do_reset_sequence 1 2
# Set extended TPID for PTM packet flow
sys memwl 1FB50000 81001839
# VLAN TPID - VLAN
sys memwl 1FB50F18 8100
ifconfig ${WANPORT} mtu 1500
else
/userfs/bin/blapi_cmd system set_wan_mode 0
/userfs/bin/blapi_cmd xdsl reload_ko 1
/etc/init.d/br2684ctl start
# Set extended TPID for ATM packet flow
sys memwl 1FB50000 884C1839
# VLAN TPID - MPOA
sys memwl 1FB50F18 884C
ifconfig ${WANPORT} mtu 1982
ifconfig ${WANPORT} down up
OLDWANPORT="$WANPORT"
ATMINDEX="$(cat /sys/class/atm/TSARM*/atmindex | tail -n1 2> /dev/null)"
WANPORT="nas$((ATMINDEX))"
fi
if [ "$OLDWANPORT" != "$WANPORT" ]; then
call_wan_hotplug "down" "$OLDWANPORT"
FILES="$(grep "$OLDWANPORT" /etc/config/* | cut -f1 -d: | uniq | cut -f4 -d/ | xargs)"
for FILE in $FILES; do
sed -i -e "s#${OLDWANPORT}#${WANPORT}#g" "/etc/config/${FILE}"
"/etc/init.d/${FILE}" restart
done
else
/etc/init.d/network restart
fi
ifconfig ${OLDWANPORT} down up
fi
if [ "$WANMODE" = "PTM" ]; then
: # ToDo
else
create_atm_devices
fi
call_wan_hotplug "up" "$WANPORT"
PREVWANMODE="$WANMODE"
if [ ! -s /tmp/qos/wan_link_shape_rate ]; then
LINKSPEED="$(awk '/far-end interleaved channel bit rate/{print $6}' /proc/tc3162/adsl_stats)"
LINKSPEED=$((LINKSPEED))
if [ "$LINKSPEED" -eq 0 ]; then
LINKSPEED="$(awk '/far-end fast channel bit rate/{print $6}' /proc/tc3162/adsl_stats)"
LINKSPEED=$((LINKSPEED))
fi
if [ "$LINKSPEED" -ne 0 ]; then
mkdir -p /tmp/qos
touch /tmp/qos/wan_link_shape_rate
/userfs/bin/qosrule discpline Rate uplink-bandwidth ${LINKSPEED}
hw_nat -! > /dev/null 2>&1
else
rm -rf /tmp/qos/wan_link_speed
/usr/sbin/qos-uplink-bandwidth
fi
fi
fi
# Toggle link state
network_defer_device "$WANPORT"
network_ready_device "$WANPORT"
# We are only interested in the transtion from init -> up/down and up/down -> down/up and vice versa.
# Since we poll the status via in-band signaling packets might get lost and the /procfs file is empty.
# This state we don't want to handle as it will toggle the link and do a re-setup which is totally unnecessary and unwanted.
PREVLINK="$LINK"
fi
sleep 5
done

View File

@@ -25,7 +25,6 @@ config dsl-line line
list profile 12b
list profile 17a
list profile 30a
list profile 35b
option bitswap 1
option sra 1
option us0 1 # VDSL2 only

View File

@@ -5,13 +5,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=ethmngr
PKG_VERSION:=3.0.8
PKG_VERSION:=3.0.7
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/hal/ethmngr.git
PKG_SOURCE_VERSION:=c73e5b15718ca40b2740bbe6151dfbb2bcca16df
PKG_SOURCE_VERSION:=171cf63d972c6fa81b97281531e457a0967c16c7
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)_$(PKG_SOURCE_VERSION).tar.xz
PKG_MIRROR_HASH:=skip
endif

View File

@@ -2,8 +2,6 @@
. /lib/functions.sh
ZONE_NAME_FILE="/tmp/service_fw_zone"
log() {
echo "${@}"|logger -t firewall.service -p info
}
@@ -19,37 +17,6 @@ exec_cmd() {
return 0
}
collect_zone_name() {
local name network
config_get name "${1}" name ""
if [ -z "${name}" ]; then
return
fi
config_get network "${1}" network ""
for i in ${network}; do
var="${i}_zone"
echo "${var}=${name}" >> "${ZONE_NAME_FILE}"
done
}
load_zone_names() {
rm -f "${ZONE_NAME_FILE}"
config_foreach collect_zone_name zone
}
get_firewall_zone() {
if [ ! -f "${ZONE_NAME_FILE}" ]; then
echo ""
return
fi
var="${1}_zone="
name="$(cat ${ZONE_NAME_FILE} | grep ${var} | head -n 1 | cut -d'=' -f 2)"
echo "${name}"
}
add_iptable_rule() {
chain_name=$1
protocol=$2
@@ -168,14 +135,9 @@ add_service() {
fi
action=$(echo "${target}" | tr a-z A-Z)
zone_name="$(get_firewall_zone ${interface})"
if [ -z "${zone_name}" ]; then
log "Rule can not be added without zone name for interface ${interface}"
return
fi
chain_name="zone_${zone_name}_input"
chain_name="zone_${interface}_input"
res=0
count=$(echo "${proto}" | sed -n "/-1/p" | wc -l)
if [ "${count}" -eq 0 ]; then
@@ -198,9 +160,4 @@ add_service() {
}
config_load firewall
load_zone_names
config_foreach add_service "service"
rm -f "${ZONE_NAME_FILE}"

View File

@@ -5,16 +5,15 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=fluent-bit
PKG_VERSION:=4.0.4
PKG_VERSION:=4.0.2
PKG_RELEASE:=$(AUTORELEASE)
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://github.com/fluent/fluent-bit.git
PKG_SOURCE_VERSION=v$(PKG_VERSION)
PKG_SOURCE:=$(PKG_NAME)-v$(PKG_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
PKG_SOURCE_URL_FILE:=v$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://github.com/fluent/fluent-bit/archive/refs/tags/
PKG_HASH:=aa0577ba7251081c8d5398b2a905b5b0585bb657ca13b39a5e12931437516f08
endif
PKG_LICENSE:=Apache-2.0
@@ -66,9 +65,9 @@ CMAKE_OPTIONS += \
-DFLB_IN_DISK=Yes \
-DFLB_IN_EXEC=Yes \
-DFLB_IN_HEAD=Yes \
-DFLB_IN_KMSG=Yes \
-DFLB_IN_TAIL=Yes \
-DFLB_IN_FORWARD=No \
-DFLB_IN_KMSG=No \
-DFLB_IN_PROC=No \
-DFLB_IN_RANDOM=No \
-DFLB_IN_SERIAL=No \

View File

@@ -9,10 +9,6 @@
tag syslog
path /dev/log
[INPUT]
name kmsg
tag kernel
[OUTPUT]
name null
match *

View File

@@ -0,0 +1,45 @@
diff --git a/plugins/out_file/file.c b/plugins/out_file/file.c
index 2e47c9666..95d28e438 100644
--- a/plugins/out_file/file.c
+++ b/plugins/out_file/file.c
@@ -27,6 +27,7 @@
#include <msgpack.h>
#include <stdio.h>
+#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
@@ -55,6 +56,7 @@ struct flb_file_conf {
int csv_column_names;
int mkdir;
struct flb_output_instance *ins;
+ char hostname[256];
};
static char *check_delimiter(const char *str)
@@ -141,6 +143,9 @@ static int cb_file_init(struct flb_output_instance *ins,
}
}
+ if (gethostname(ctx->hostname, sizeof(ctx->hostname)) != 0)
+ snprintf(ctx->hostname, sizeof(ctx->hostname), "%s", "localhost");
+
tmp = flb_output_get_property("delimiter", ins);
ret_str = check_delimiter(tmp);
if (ret_str != NULL) {
@@ -233,12 +238,8 @@ static int template_output_write(struct flb_file_conf *ctx,
int i;
msgpack_object_kv *kv;
- /*
- * Right now we treat "{time}" specially and fill the placeholder
- * with the metadata timestamp (formatted as float).
- */
- if (!strncmp(key, "time", size)) {
- fprintf(fp, "%f", flb_time_to_double(tm));
+ if (!strncmp(key, "hostname", size)) {
+ fprintf(fp, "%s", ctx->hostname);
return 0;
}

View File

@@ -1,27 +0,0 @@
diff --git a/plugins/out_file/file.c b/plugins/out_file/file.c
index 77baf6be8..04c519d5a 100644
--- a/plugins/out_file/file.c
+++ b/plugins/out_file/file.c
@@ -238,10 +238,20 @@ static int template_output_write(struct flb_file_conf *ctx,
/*
* Right now we treat "{time}" specially and fill the placeholder
- * with the metadata timestamp (formatted as float).
+ * with the metadata timestamp.
*/
if (!strncmp(key, "time", size)) {
- fprintf(fp, "%f", flb_time_to_double(tm));
+ struct tm tm_local;
+ char buf[32];
+ if (localtime_r(&tm->tm.tv_sec, &tm_local) == NULL) {
+ flb_plg_error(ctx->ins, "localtime_r failed");
+ return -1;
+ }
+ if (strftime(buf, sizeof(buf), "%b %d %H:%M:%S", &tm_local) == 0) {
+ flb_plg_error(ctx->ins, "strftime failed");
+ return -1;
+ }
+ fputs(buf, fp);
return 0;
}

View File

@@ -1,73 +0,0 @@
diff --git a/plugins/in_kmsg/in_kmsg.c b/plugins/in_kmsg/in_kmsg.c
index cd5c4cd17..15f105451 100644
--- a/plugins/in_kmsg/in_kmsg.c
+++ b/plugins/in_kmsg/in_kmsg.c
@@ -36,7 +36,6 @@
#include <sys/stat.h>
#include <sys/time.h>
#include <inttypes.h>
-#include <time.h>
#include "in_kmsg.h"
@@ -123,12 +122,17 @@ static inline int process_line(const char *line,
ctx->buffer_id++;
errno = 0;
- val = strtol(p, &end, 10);
- if ((errno == ERANGE && (val == INT_MAX || val == INT_MIN))
+ val = strtoul(p, &end, 10);
+ if ((errno == ERANGE && val == ULONG_MAX)
|| (errno != 0 && val == 0)) {
goto fail;
}
+ /* ensure something was consumed */
+ if (end == p) {
+ goto fail;
+ }
+
/* Priority */
priority = FLB_KLOG_PRI(val);
@@ -144,24 +148,35 @@ static inline int process_line(const char *line,
}
p++;
- val = strtoul(p, &end, 10);
- if ((errno == ERANGE && (val == INT_MAX || val == INT_MIN))
+ val = strtoull(p, &end, 10);
+ if ((errno == ERANGE && val == ULLONG_MAX)
|| (errno != 0 && val == 0)) {
goto fail;
}
+ /* make sure strtoull consumed something */
+ /* after the sequence number, the next char must be ',' */
+ if (end == p || *end != ',') {
+ goto fail;
+ }
+
sequence = val;
p = ++end;
/* Timestamp */
- val = strtoul(p, &end, 10);
- if ((errno == ERANGE && (val == INT_MAX || val == INT_MIN))
+ val = strtoull(p, &end, 10);
+ if ((errno == ERANGE && val == ULLONG_MAX)
|| (errno != 0 && val == 0)) {
goto fail;
}
+ /* ensure something was consumed */
+ if (end == p) {
+ goto fail;
+ }
+
tv.tv_sec = val/1000000;
- tv.tv_usec = val - (tv.tv_sec * 1000000);
+ tv.tv_usec = val - ((uint64_t)tv.tv_sec * 1000000);
flb_time_set(&ts, ctx->boot_time.tv_sec + tv.tv_sec, tv.tv_usec * 1000);

View File

@@ -5,12 +5,12 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=hostmngr
PKG_VERSION:=1.3.3
PKG_VERSION:=1.2.20
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=fee5bd0067fc1f30498bc2b81e893d170796b459
PKG_SOURCE_VERSION:=3948618fa8fa23a0ddc51632b0036dbd08e27696
PKG_SOURCE_URL:=https://dev.iopsys.eu/iopsys/hostmngr.git
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)_$(PKG_SOURCE_VERSION).tar.xz
PKG_MIRROR_HASH:=skip

View File

@@ -8,13 +8,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=icwmp
PKG_VERSION:=9.9.9.3
PKG_VERSION:=9.9.8
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/bbf/icwmp.git
PKG_SOURCE_VERSION:=55a64d756afd6249b8bb4cccf2cdaa7e1aa05f91
PKG_SOURCE_VERSION:=13c8bed356f9a87621fbb873f250f4fb7d2a7f96
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif
@@ -88,7 +88,6 @@ define Package/icwmp/install
$(INSTALL_BIN) ./files/etc/uci-defaults/90-cwmpfirewall $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/uci-defaults/95-set-random-inform-time $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/uci-defaults/85-migrate-gw-info $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/uci-defaults/999-cwmp-conn-config $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/icwmpd/vendor_log.sh $(1)/etc/icwmpd/vendor_log.sh
$(INSTALL_BIN) ./files/etc/icwmpd/firewall.cwmp $(1)/etc/icwmpd/firewall.cwmp
$(INSTALL_DATA) ./files/lib/upgrade/keep.d/icwmp $(1)/lib/upgrade/keep.d/icwmp

View File

@@ -28,7 +28,6 @@ config cpe 'cpe'
option log_severity 'WARNING'
option log_file_name '/var/log/icwmpd.log'
option log_max_size '102400'
option bind_retries '5'
option userid '' #$OUI-$SER
option passwd ''
option port '7547'

View File

@@ -1,44 +1,14 @@
#!/bin/sh
. /lib/functions.sh
ZONE_NAME_FILE="/tmp/cwmp_fw_zone"
#created by the icwmp package
log() {
echo "${@}"|logger -t firewall.cwmp -p info
}
collect_zone_name() {
local name network
config_get name "${1}" name ""
if [ -z "${name}" ]; then
return
fi
config_get network "${1}" network ""
for i in ${network}; do
var="${i}_zone"
echo "${var}=${name}" >> "${ZONE_NAME_FILE}"
done
}
load_zone_names() {
rm -f "${ZONE_NAME_FILE}"
config_load firewall
config_foreach collect_zone_name zone
}
get_firewall_zone() {
if [ ! -f "${ZONE_NAME_FILE}" ]; then
echo ""
return
fi
var="${1}_zone="
name="$(cat ${ZONE_NAME_FILE} | grep ${var} | head -n 1 | cut -d'=' -f 2)"
echo "${name}"
zone="$(uci show firewall|grep network|grep -w "${1}"|cut -d. -f 2)"
zone="${zone:-wan}" # defaults to wan zone
echo "$zone"
}
cleanup_upstream_rules() {
@@ -199,6 +169,4 @@ configure_connection_req_rules() {
fi
}
load_zone_names
configure_connection_req_rules "$@"
rm -f "${ZONE_NAME_FILE}"

View File

@@ -16,6 +16,79 @@ log() {
echo "${@}"|logger -t cwmp.init -p info
}
regenerate_ssl_link() {
local cert_dir
cert_dir="${1%/}"
if [ -f "${cert_dir}" ]; then
return 0
fi
# do not generate the c_rehash if its system default cert path
# ca-certificate package already generates c_rehash on compilation
[ ! -d "${cert_dir}" ] || [ "${cert_dir}" = "/etc/ssl/certs" ] && return 0
generate_links() {
local file_type="$1"
local files="${cert_dir}"/*."${file_type}"
for cfile in ${files}; do
if [ -f "${cfile}" ]; then
rehash="$(openssl x509 -hash -noout -in "${cfile}")"
if [ ! -f "${cert_dir}/${rehash}.0" ]; then
log "Generating c_rehash for ${cfile}=>${rehash}.0"
ln -s "${cfile}" "${cert_dir}/${rehash}.0"
fi
fi
done
}
generate_links "pem"
}
enable_dhcp_option43() {
local wan="${1}"
### Ask for DHCP Option 43 only if CWMP is enabled ###
local reqopts="$(uci -q get network."${wan}".reqopts)"
local proto="$(uci -q get network."${wan}".proto)"
local newreqopts=""
local option43_present=0
for ropt in $reqopts; do
case $ropt in
43) option43_present=1 ;;
*) ;;
esac
done
if [ ${option43_present} -eq 1 ]; then
return;
fi
newreqopts="$reqopts 43"
if [ "${proto}" = "dhcp" ]; then
uci -q set network."${wan}".reqopts="$newreqopts"
uci commit network
ubus call network reload
fi
}
set_vendor_id() {
local wan="${1}"
local proto="$(uci -q get network."${wan}".proto)"
if [ "${proto}" = "dhcp" ]; then
vendorid="$(uci -q get network."${wan}".vendorid)"
if [ -z "${vendorid}" ]; then
uci -q set network."${wan}".vendorid="dslforum.org"
ubus call uci commit '{"config":"network"}'
elif [[ $vendorid != *"dslforum.org"* ]]; then
uci -q set network."${wan}".vendorid="${vendorid},dslforum.org"
ubus call uci commit '{"config":"network"}'
fi
fi
}
wait_for_resolvfile() {
local time=$1
local tm=1
@@ -138,6 +211,28 @@ validate_defaults() {
}
boot() {
local dhcp_discovery wan_interface skip_dhcp_boot_options
config_load cwmp
config_get wan_interface cpe default_wan_interface "wan"
config_get dhcp_discovery acs dhcp_discovery "0"
config_get skip_dhcp_boot_options acs skip_dhcp_boot_options "0"
if [ "${dhcp_discovery}" = "enable" ] || [ "${dhcp_discovery}" = "1" ]; then
if [ "${skip_dhcp_boot_options}" -ne 1 ]; then
# Set dhcp option 43 if not already configured
enable_dhcp_option43 "${wan_interface}"
# Set dhcp option 60
set_vendor_id "${wan_interface}"
fi
fi
config_get ssl_capath acs ssl_capath
if [ -n "${ssl_capath}" ]; then
regenerate_ssl_link "${ssl_capath}"
fi
# Copy backup data so that if it restart latter on, it gets the info
copy_cwmp_etc_files_to_varstate
mkdir -p /var/run/icwmpd/

View File

@@ -1,107 +0,0 @@
#!/bin/sh
. /lib/functions.sh
log() {
echo "${@}"|logger -t cwmp.defaults -p info
}
set_vendor_id() {
local wan="${1}"
local proto="$(uci -q get network."${wan}".proto)"
if [ "${proto}" = "dhcp" ]; then
vendorid="$(uci -q get network."${wan}".vendorid)"
if [ -z "${vendorid}" ]; then
uci -q set network."${wan}".vendorid="dslforum.org"
elif [[ $vendorid != *"dslforum.org"* ]]; then
uci -q set network."${wan}".vendorid="${vendorid},dslforum.org"
fi
fi
}
enable_dhcp_option43() {
local wan="${1}"
local reqopts="$(uci -q get network."${wan}".reqopts)"
local proto="$(uci -q get network."${wan}".proto)"
local newreqopts=""
local option43_present=0
for ropt in $reqopts; do
case $ropt in
43) option43_present=1 ;;
*) ;;
esac
done
if [ ${option43_present} -eq 1 ]; then
return;
fi
newreqopts="$reqopts 43"
if [ "${proto}" = "dhcp" ]; then
uci -q set network."${wan}".reqopts="$newreqopts"
fi
}
regenerate_ssl_link() {
local cert_dir
cert_dir="${1%/}"
if [ -f "${cert_dir}" ]; then
return 0
fi
# do not generate the c_rehash if its system default cert path
# ca-certificate package already generates c_rehash on compilation
[ ! -d "${cert_dir}" ] || [ "${cert_dir}" = "/etc/ssl/certs" ] && return 0
generate_links() {
local file_type="$1"
local files="${cert_dir}"/*."${file_type}"
for cfile in ${files}; do
if [ -f "${cfile}" ]; then
rehash="$(openssl x509 -hash -noout -in "${cfile}")"
if [ ! -f "${cert_dir}/${rehash}.0" ]; then
log "Generating c_rehash for ${cfile}=>${rehash}.0"
ln -s "${cfile}" "${cert_dir}/${rehash}.0"
fi
fi
done
}
generate_links "pem"
}
configure_dhcp_discovery() {
local dhcp_discovery wan_interface skip_dhcp_boot_options
config_load cwmp
config_get wan_interface cpe default_wan_interface "wan"
config_get dhcp_discovery acs dhcp_discovery "0"
config_get skip_dhcp_boot_options acs skip_dhcp_boot_options "0"
if [ "${dhcp_discovery}" = "enable" ] || [ "${dhcp_discovery}" = "1" ]; then
if [ "${skip_dhcp_boot_options}" -ne 1 ]; then
# Set dhcp option 43 if not already configured
enable_dhcp_option43 "${wan_interface}"
# Set dhcp option 60
set_vendor_id "${wan_interface}"
fi
fi
}
configure_ssl_path() {
local ssl_capath
config_load cwmp
config_get ssl_capath acs ssl_capath
if [ -n "${ssl_capath}" ]; then
regenerate_ssl_link "${ssl_capath}"
fi
}
configure_dhcp_discovery
configure_ssl_path

View File

@@ -1,17 +1,16 @@
#
# Copyright (C) 2020-2024 IOPSYS Software Solutions AB
# Copyright (C) 2025 Genexis Sweden AB
#
include $(TOPDIR)/rules.mk
PKG_NAME:=ieee1905
PKG_VERSION:=8.7.33
PKG_VERSION:=8.7.8
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=f28f1c04cae008d7d6448ba02b992506af28448c
PKG_SOURCE_VERSION:=9c507bfb7f45fad81097262f05dc7cd11760e6b0
PKG_SOURCE_URL:=https://dev.iopsys.eu/multi-ap/ieee1905.git
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)_$(PKG_SOURCE_VERSION).tar.xz
PKG_MIRROR_HASH:=skip
@@ -106,7 +105,6 @@ TARGET_CFLAGS += -DHAS_UBUS
ifeq ($(CONFIG_IEEE1905_BUILD_TR181_PLUGIN),y)
TARGET_CFLAGS += -DBUILD_TR181_PLUGIN
TARGET_CFLAGS += -DBBF_VENDOR_PREFIX=\\\"$(CONFIG_BBF_VENDOR_PREFIX)\\\"
endif
MAKE_FLAGS += \

View File

@@ -12,7 +12,6 @@ config al-iface
list ifname 'br-lan'
list ifname '/eth.*'
list ifname '/wl.*'
list ifname '/ra.*'
list ifname '/wds.*'
# ap sections are auto-generated/overwritten during onboarding

View File

@@ -1,9 +1,10 @@
#!/bin/sh
. /lib/functions/system.sh
BMAC=$(get_mac_label)
BMAC=$(db -q get hw.board.basemac)
BMAC=${BMAC//:/}
BMAC=${BMAC// /}
BMAC=$(printf "%12.12X" $((0x$BMAC)))
[ "$BMAC" == "" ] && exit 1

View File

@@ -4,7 +4,7 @@ PKG_NAME:=iopsys-analytics
PKG_RELEASE:=$(COMMITCOUNT)
PKG_LICENSE:=PROPRIETARY
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=25e32ac5a860aec6e53e3449565b71595073e014
PKG_SOURCE_VERSION:=00189cea0a78b7a30dbfdd363b6d8e836437d1bc
PKG_SOURCE_URL:=https://dev.iopsys.eu/iopsys/iopsys-analytics.git
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)_$(PKG_SOURCE_VERSION).tar.xz
PKG_MIRROR_HASH:=skip
@@ -31,9 +31,8 @@ define Package/$(PKG_NAME)
+@PACKAGE_COLLECTD_ENCRYPTED_NETWORK \
# remote syslog
DEPENDS+= \
+@PACKAGE_syslog-ng:SYSLOGNG_LOGROTATE \
+PACKAGE_fluent-bit:logrotate \
+@DMCLI_REMOTE_CONNECTION
+syslog-ng \
+@SYSLOGNG_LOGROTATE \
endef
@@ -45,13 +44,7 @@ endef
Build/Compile=
define Package/$(PKG_NAME)/install
$(CP) -r $(PKG_BUILD_DIR)/files/common/* $(1)/
ifneq ($(CONFIG_PACKAGE_fluent-bit),)
$(CP) -r $(PKG_BUILD_DIR)/files/fluent-bit/* $(1)/
endif
ifneq ($(CONFIG_PACKAGE_syslog-ng),)
$(CP) -r $(PKG_BUILD_DIR)/files/syslog-ng/* $(1)/
endif
$(CP) -r $(PKG_BUILD_DIR)/files/* $(1)/
endef
$(eval $(call BuildPackage,$(PKG_NAME)))

View File

@@ -6,13 +6,13 @@ include $(TOPDIR)/rules.mk
include $(INCLUDE_DIR)/kernel.mk
PKG_NAME:=ipt-trigger
PKG_VERSION:=1.0.3
PKG_VERSION:=1.0.2
PKG_LICENSE:=GPL-2.0
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=ac1beae4794f99533b28db7d0e6e80f4c268a3e8
PKG_SOURCE_VERSION:=4f3d4427403e0a9be7653c1b92907ae8ae5f21ae
PKG_SOURCE_URL:=https://dev.iopsys.eu/network/ipt-trigger.git
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip

View File

@@ -1,55 +0,0 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=json-schema-validator
PKG_VERSION:=2.3.0
PKG_RELEASE:=1
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://github.com/pboettch/json-schema-validator.git
PKG_SOURCE_VERSION:=$(PKG_VERSION)
PKG_MIRROR_HASH:=skip
PKG_LICENSE:=MIT
PKG_LICENSE_FILES:=LICENSE
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
PKG_BUILD_PARALLEL:=1
PKG_INSTALL:=1
PKG_BUILD_DEPENDS:=nlohmann-json
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/cmake.mk
CMAKE_OPTIONS += \
-DJSON_VALIDATOR_BUILD_TESTS=OFF \
-DJSON_VALIDATOR_BUILD_EXAMPLES=OFF \
-DJSON_VALIDATOR_INSTALL=ON \
-DJSON_VALIDATOR_SHARED_LIBS=OFF
define Package/json-schema-validator
SECTION:=libs
CATEGORY:=Libraries
TITLE:=JSON Schema Validator for nlohmann::json
URL:=https://github.com/pboettch/json-schema-validator
DEPENDS:=+libstdcpp +nlohmann-json
endef
define Package/json-schema-validator/description
A JSON Schema Validator for Modern C++ using nlohmann/json.
endef
define Build/InstallDev
$(INSTALL_DIR) $(1)/usr/include/nlohmann
$(CP) $(PKG_BUILD_DIR)/src/nlohmann/json-schema.hpp $(1)/usr/include/nlohmann/
$(INSTALL_DIR) $(1)/usr/lib
$(CP) $(PKG_BUILD_DIR)/libnlohmann_json_schema_validator*.a $(1)/usr/lib/ 2>/dev/null || true
$(CP) $(PKG_BUILD_DIR)/libnlohmann_json_schema_validator*.so* $(1)/usr/lib/ 2>/dev/null || true
endef
define Package/json-schema-validator/install
true
endef
$(eval $(call BuildPackage,json-schema-validator))

View File

@@ -1,44 +0,0 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=jsonval
PKG_VERSION:=1.0.0
PKG_RELEASE:=1
PKG_LICENSE:=MIT
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
include $(INCLUDE_DIR)/package.mk
define Package/jsonval
SECTION:=utils
CATEGORY:=Utilities
TITLE:=Command-line JSON Schema Validator
DEPENDS:=+nlohmann-json +json-schema-validator +libstdcpp
endef
define Package/jsonval/description
A small CLI tool to validate JSON files against a schema using json-schema-validator and nlohmann/json.
endef
define Build/Prepare
mkdir -p $(PKG_BUILD_DIR)
cp -r ./src/* $(PKG_BUILD_DIR)/
endef
define Build/Compile
$(TARGET_CXX) \
$(TARGET_CXXFLAGS) -std=c++17 \
-I$(STAGING_DIR)/usr/include \
-I$(STAGING_DIR)/usr/include/nlohmann \
-L$(STAGING_DIR)/usr/lib \
$(PKG_BUILD_DIR)/main.cpp \
-o $(PKG_BUILD_DIR)/jsonval \
-lnlohmann_json_schema_validator
endef
define Package/jsonval/install
$(INSTALL_DIR) $(1)/usr/bin
$(INSTALL_BIN) $(PKG_BUILD_DIR)/jsonval $(1)/usr/bin/
endef
$(eval $(call BuildPackage,jsonval))

View File

@@ -1,64 +0,0 @@
#include <iostream>
#include <fstream>
#include <string>
#include <nlohmann/json.hpp>
#include <nlohmann/json-schema.hpp>
using nlohmann::json;
using nlohmann::json_schema::json_validator;
using namespace std;
void print_usage(const string& prog_name) {
cerr << "Usage: " << prog_name << " -s <schema.json> -j <data.json>" << endl;
}
int main(int argc, char* argv[]) {
string schema_path, data_path;
// Simple argument parsing
for (int i = 1; i < argc; ++i) {
string arg = argv[i];
if ((arg == "-s" || arg == "--schema") && i + 1 < argc) {
schema_path = argv[++i];
} else if ((arg == "-j" || arg == "--json") && i + 1 < argc) {
data_path = argv[++i];
} else {
print_usage(argv[0]);
return 1;
}
}
if (schema_path.empty() || data_path.empty()) {
print_usage(argv[0]);
return 1;
}
ifstream schema_file(schema_path);
ifstream data_file(data_path);
if (!schema_file.is_open() || !data_file.is_open()) {
cerr << "Error: Could not open one or both files." << endl;
return 2;
}
json schema, document;
try {
schema_file >> schema;
data_file >> document;
} catch (const json::parse_error& e) {
cerr << "Parse error: " << e.what() << endl;
return 3;
}
try {
json_validator validator;
validator.set_root_schema(schema);
validator.validate(document);
cout << "Valid" << endl;
} catch (const std::exception& e) {
cerr << "Validation failed: " << e.what() << endl;
return 4;
}
return 0;
}

View File

@@ -5,12 +5,12 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=libdpp
PKG_VERSION:=2.1.2
PKG_VERSION:=2.1.1
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=5f1184c52be19f3bfd3bc7e9bc582ef09b0a2b1c
PKG_SOURCE_VERSION:=6024efd3db9dd490c07465ea9b0c15120063165c
PKG_SOURCE_URL:=https://dev.iopsys.eu/multi-ap/libdpp.git
PKG_MAINTAINER:=Jakob Olsson <jakob.olsson@iopsys.eu>
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)_$(PKG_SOURCE_VERSION).tar.xz

View File

@@ -5,13 +5,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=libdsl
PKG_VERSION:=7.3.2
PKG_VERSION:=7.2.100
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=20875ec79fcc7c546c2f1253c867d6afbc8bff75
PKG_SOURCE_URL:=https://dev.iopsys.eu/hal/libdsl.git
PKG_SOURCE_VERSION:=1aa9c40f9503311652e562617b1e15533257adcc
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)_$(PKG_SOURCE_VERSION).tar.xz
PKG_MIRROR_HASH:=skip
endif
@@ -37,9 +37,6 @@ else ifeq ($(CONFIG_TARGET_x86),y)
else ifeq ($(CONFIG_TARGET_armvirt),y)
TARGET_PLATFORM=TEST
TARGET_CFLAGS +=-DIOPSYS_TEST
else ifeq ($(CONFIG_TARGET_airoha),y)
TARGET_PLATFORM=AIROHA
TARGET_CFLAGS +=-DIOPSYS_AIROHA
endif
TARGET_CFLAGS += \
@@ -60,7 +57,7 @@ define Package/libdsl
SUBMENU:=IOPSYS HAL libs
MENU:=1
TITLE:= xDSL library (libdsl)
DEPENDS+=TARGET_brcmbca:bcm963xx-bsp +TARGET_airoha:libeasy
DEPENDS+=TARGET_brcmbca:bcm963xx-bsp
endef
define Package/libdsl/description

View File

@@ -8,7 +8,7 @@ include $(TOPDIR)/rules.mk
PKG_NAME:=libvoice-airoha
PKG_RELEASE:=1
PKG_VERSION:=1.1.7
PKG_VERSION:=1.1.3
PKG_LICENSE:=PROPRIETARY
PKG_LICENSE_FILES:=LICENSE
@@ -17,7 +17,7 @@ LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/iopsys/$(PKG_NAME).git
PKG_SOURCE_VERSION:=3a30086a68a3409f0396acb01380f91daabf7a2f
PKG_SOURCE_VERSION:=f4ffa38b77e20f9e2a6b6ffd5b2bf83cddb6bffc
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif

View File

@@ -5,12 +5,12 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=libwifi
PKG_VERSION:=7.13.6
PKG_VERSION:=7.13.0
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=d17ad7415a821e95618c4739507bf129df3fdebf
PKG_SOURCE_VERSION:=c3bd60c0b23a6bdb2280693bd7784d25a70090ca
PKG_SOURCE_URL:=https://dev.iopsys.eu/iopsys/libwifi.git
PKG_MAINTAINER:=Anjan Chanda <anjan.chanda@iopsys.eu>
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)_$(PKG_SOURCE_VERSION).tar.xz

View File

@@ -1,5 +1,4 @@
if PACKAGE_logmngr
choice
prompt "Select backend for syslog management"
default LOGMNGR_BACKEND_FLUENTBIT
@@ -32,5 +31,4 @@ config LOGMNGR_VENDOR_LOG_FILE
default y
help
It adds support for Device.DeviceInfo.VendorLogFile. Object.
endif

View File

@@ -5,13 +5,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=logmngr
PKG_VERSION:=1.1.4
PKG_VERSION:=1.0.16
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/system/logmngr.git
PKG_SOURCE_VERSION:=62441fdfe14a39bff8fff7c62307bd7b54d7240f
PKG_SOURCE_VERSION:=1561b71a2225af737db9f091204247ab4e141abb
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif
@@ -29,6 +29,7 @@ define Package/logmngr
CATEGORY:=Utilities
TITLE:=Logging Manager
DEPENDS:=+LOGMNGR_BACKEND_FLUENTBIT:fluent-bit
DEPENDS+=+@LOGMNGR_BACKEND_FLUENTBIT:BUSYBOX_CONFIG_KLOGD
DEPENDS+=+LOGMNGR_BACKEND_SYSLOG_NG:syslog-ng
DEPENDS+=+LOGMNGR_LOGROTATE:logrotate
DEPENDS+=+libbbfdm-api +libbbfdm-ubus +dm-service
@@ -52,35 +53,31 @@ endif
define Package/logmngr/install
$(INSTALL_DIR) $(1)/etc/init.d
$(INSTALL_DIR) $(1)/etc/config
$(INSTALL_BIN) ./files/logmngr.init $(1)/etc/init.d/logmngr
$(INSTALL_DIR) $(1)/etc/uci-defaults
$(INSTALL_BIN) ./files/10-logmngr_config_generate $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/init.d/logmngr $(1)/etc/init.d/
$(INSTALL_DATA) ./files/etc/config/logmngr $(1)/etc/config/
$(INSTALL_DATA) ./files/etc/uci-defaults/10-logmngr_config_migrate $(1)/etc/uci-defaults/
$(BBFDM_INSTALL_MS_PLUGIN) $(PKG_BUILD_DIR)/bbf_plugin/libbbfsyslog.so $(1) core 10
# Install logmngr service backend
$(INSTALL_DIR) $(1)/lib/logmngr
ifeq ($(CONFIG_LOGMNGR_BACKEND_FLUENTBIT),y)
$(INSTALL_DATA) ./files/lib/logmngr/fluent-bit.sh $(1)/lib/logmngr/
$(INSTALL_DIR) $(1)/usr/libexec
$(INSTALL_BIN) ./files/logmngr-klogd $(1)/usr/libexec/
$(INSTALL_DIR) $(1)/sbin
$(INSTALL_BIN) ./files/logread $(1)/sbin/
$(INSTALL_DATA) ./files/lib/logmngr/fluent-bit.sh $(1)/lib/logmngr/
else ifeq ($(CONFIG_LOGMNGR_BACKEND_SYSLOG_NG),y)
endif
ifeq ($(CONFIG_LOGMNGR_BACKEND_SYSLOG_NG),y)
$(INSTALL_DATA) ./files/lib/logmngr/syslog-ng.sh $(1)/lib/logmngr/
endif
$(BBFDM_INSTALL_MS_PLUGIN) $(PKG_BUILD_DIR)/bbf_plugin/libbbfsyslog.so $(1) core 10
ifeq ($(CONFIG_LOGMNGR_LOGROTATE),y)
$(INSTALL_BIN) ./files/11-logmngr_logrotate_config_generate $(1)/etc/uci-defaults/
$(INSTALL_DATA) ./files/lib/logmngr/logrotate.sh $(1)/lib/logmngr/
$(INSTALL_DATA) ./files/etc/uci-defaults/11-logmngr_logrotate_syslog $(1)/etc/uci-defaults/
$(BBFDM_INSTALL_MS_PLUGIN) $(PKG_BUILD_DIR)/bbf_plugin/libbbflogrotate.so $(1) sysmngr 11
endif
ifeq ($(CONFIG_LOGMNGR_VENDOR_LOG_FILE),y)
$(BBFDM_INSTALL_MS_PLUGIN) $(PKG_BUILD_DIR)/bbf_plugin/libbbfvendorlog.so $(1) sysmngr 12
endif
endef
$(eval $(call BuildPackage,logmngr))

View File

@@ -0,0 +1,26 @@
#!/bin/sh
if uci -q get logmngr.@globals[0] >/dev/null; then
# return if there is any valid content
exit 0
else
rm -f /etc/config/logmngr
fi
touch /etc/config/logmngr
uci set logmngr.globals=globals
uci set logmngr.globals.enable=1
uci set logmngr.a1=action
uci set logmngr.a1.name="ac1"
uci set logmngr.lf1=log_file
uci set logmngr.lf1.enable=1
uci set logmngr.lf1.action="ac1"
uci set logmngr.lf1.file="/var/log/messages"
uci set logmngr.lr1=log_remote
uci set logmngr.lr1.enable=0
uci set logmngr.lr1.action="ac1"
uci set logmngr.lr1.port="514"

View File

@@ -1,7 +1,7 @@
#!/bin/sh
# Adds a default log rotate policy if none exists
if uci -q get logmngr.lro1 >/dev/null; then
if uci -q get logmngr.@log_rotate[0] >/dev/null; then
# return if there is any valid content
exit 0
fi

View File

@@ -1,26 +0,0 @@
config globals 'globals'
option enable '1'
config source 'default_source'
option name 'default_source'
option system_messages '1'
option kernel_messages '1'
config template 'default_template'
option name 'default_template'
option expression '{time} {hostname} {ident}: {message}'
config action 'default_action'
option name 'default_action'
list source 'default_source'
option template 'default_template'
config log_file 'lf1'
option enable '1'
option action 'default_action'
option file '/var/log/messages'
config log_remote 'lr1'
option enable '0'
option action 'default_action'
option port '514'

View File

@@ -1,36 +0,0 @@
#!/bin/sh
# check if this is a new type UCI or old type UCI
if ! uci -q get logmngr.default_source > /dev/null; then
uci -q set logmngr.default_source=source
uci -q set logmngr.default_source.name='default_source'
uci -q set logmngr.default_source.system_messages='1'
uci -q set logmngr.default_source.kernel_messages='1'
fi
if ! uci -q get logmngr.default_template > /dev/null; then
uci -q set logmngr.default_template=template
uci -q set logmngr.default_template.name='default_template'
uci -q set logmngr.default_template.expression='{time} {hostname} {ident}: {message}'
fi
if uci -q get logmngr.a1 >/dev/null; then
uci -q rename logmngr.a1='default_action'
uci -q set logmngr.default_action.name='default_action'
uci -q set logmngr.default_action.template='default_template'
uci -q delete logmngr.default_action.source
uci -q add_list logmngr.default_action.source='default_source'
fi
if uci -q get logmngr.lf1 >/dev/null; then
uci -q rename logmngr.lf1='default_logfile'
uci -q set logmngr.default_logfile.action='default_action'
fi
if uci -q get logmngr.lr1 >/dev/null; then
uci -q rename logmngr.lr1='default_logremote'
uci -q set logmngr.default_logremote.action='default_action'
fi
exit 0

View File

@@ -5,38 +5,7 @@
CONF_FILE=/etc/fluent-bit/fluent-bit.conf
TMP_CONF_FILE=/tmp/fluent-bit/fluent-bit.conf
FLUENT_BIT_CONF_DIR=/etc/fluent-bit/conf.d
PROCESSED_SYSLOG_TAGS=""
PROCESSED_KMSG_TAGS=""
# check if syslog source section is already processed
# and add it to the list of processed source sections
syslog_tag_already_processed() {
local tag="$1"
for t in $PROCESSED_SYSLOG_TAGS; do
[ "$t" = "$tag" ] && return 0
done
PROCESSED_SYSLOG_TAGS="$tag $PROCESSED_SYSLOG_TAGS"
return 1
}
# check if kmsg source section is already processed
# and add it to the list of processed source sections
# two separate functions used because we want to populate
# appropriate PROCESSED variable
kmsg_tag_already_processed() {
local tag="$1"
for t in $PROCESSED_KMSG_TAGS; do
[ "$t" = "$tag" ] && return 0
done
PROCESSED_KMSG_TAGS="$tag $PROCESSED_KMSG_TAGS"
return 1
}
CONF_DIR=/etc/fluent-bit/conf.d
append_conf() {
echo "$*" >> ${TMP_CONF_FILE}
@@ -46,281 +15,210 @@ create_config_file() {
mkdir -p /tmp/fluent-bit
rm -f ${TMP_CONF_FILE}
touch ${TMP_CONF_FILE}
# include all files placed in FLUENT_BIT_CONF_DIR directory
# include all files placed in CONF_DIR directory
# fluent-bit does not support using directory in include directive
# also, if no file is found then fluent-bit aborts
# so only add include if any file is present in the FLUENT_BIT_CONF_DIR
if [ -d "$FLUENT_BIT_CONF_DIR" ] && [ "$(ls -A "$FLUENT_BIT_CONF_DIR")" ]; then
append_conf "@INCLUDE ${FLUENT_BIT_CONF_DIR}/*"
# so only add include if any file is present in the CONF_DIR
if [ -d "$CONF_DIR" ] && [ "$(ls -A "$CONF_DIR")" ]; then
echo "@INCLUDE ${CONF_DIR}/*" >> ${TMP_CONF_FILE}
fi
append_conf ""
echo "" >> ${TMP_CONF_FILE}
}
create_service_section() {
# the service section of the fluent-bit.conf file has hardcoded values,
# no need to lookup any uci section to configure this section
append_conf "[SERVICE]"
append_conf " flush 1"
append_conf " daemon off"
append_conf " log_level info"
append_conf " coro_stack_size 24576"
append_conf " parsers_file /etc/fluent-bit/parsers.conf"
append_conf " hot_reload on"
append_conf ""
}
create_default_filters() {
append_conf "[FILTER]"
append_conf " name modify"
append_conf " match KM*"
append_conf " add ident kernel"
append_conf " rename msg message"
append_conf ""
append_conf "[FILTER]"
append_conf " name sysinfo"
append_conf " match *"
append_conf " hostname_key hostname"
append_conf ""
echo "[SERVICE]" >> ${TMP_CONF_FILE}
echo " flush 1" >> ${TMP_CONF_FILE}
echo " daemon off" >> ${TMP_CONF_FILE}
echo " log_level info" >> ${TMP_CONF_FILE}
echo " coro_stack_size 24576" >> ${TMP_CONF_FILE}
echo " parsers_file /etc/fluent-bit/parsers.conf" >> ${TMP_CONF_FILE}
echo " hot_reload on" >> ${TMP_CONF_FILE}
echo "" >> ${TMP_CONF_FILE}
}
create_input_section() {
local tag="$1"
[ -z "$tag" ] && return
# check if this source section has already been processed
syslog_tag_already_processed "$tag" && return
append_conf "[INPUT]"
append_conf " name syslog"
append_conf " unix_perm 0666"
append_conf " tag $tag"
append_conf " path /dev/log"
append_conf ""
# the input in our case is always syslog, hence, this section of the
# fluent-bit.conf file has hardcoded values as well that do not depend
# on any uci value
echo "[INPUT]" >> ${TMP_CONF_FILE}
echo " name syslog" >> ${TMP_CONF_FILE}
echo " tag $tag" >> ${TMP_CONF_FILE}
echo " path /dev/log" >> ${TMP_CONF_FILE}
echo "" >> ${TMP_CONF_FILE}
}
populate_allowed_logs() {
local facility_level sev_level
local section="$1"
generate_facility_regex() {
local facility_level=$1
local pri=0
[ -z "$section" ] && return
# reset
match_pattern=""
facilities=""
all_facilities=0
kern_facility=0
severities=""
sev_compare=1
sev_action=0
# read config
config_get match_pattern $section pattern_match
config_get facility_level $section facility_level
config_get sev_level $section severity_level
config_get sev_compare $section severity_compare 1
config_get sev_action $section severity_action 0
# normalize facilities
if [ -n "$facility_level" ]; then
for f in $facility_level; do
if [ "$f" = "24" ]; then
all_facilities=1
# xargs is used to convert from new line separated numbers to space separated numbers
facilities="$(seq 0 23 | xargs)"
break
fi
if [ "$f" = "0" ]; then
kern_facility=1
fi
done
if [ "$all_facilities" -eq 0 ]; then
facilities="$facility_level"
fi
else
# default to "all facilities" when unset
all_facilities=1
facilities="$(seq 0 23 | xargs)"
fi
# normalize severities
case "$sev_level" in
8) # all severities
severities="$(seq 0 7 | xargs)"
;;
9) # none
severities="none"
;;
"") # unset, treat as "all"
severities="$(seq 0 7 | xargs)"
;;
*)
if [ "$sev_compare" = "0" ]; then
# equal
severities="$sev_level"
else
# equl or higher
severities="$(seq 0 $sev_level | xargs)"
fi
;;
esac
}
create_filter_section() {
local match_regex="$1"
local pattern="$2"
[ -z "$match_regex" ] && return
append_conf "[FILTER]"
append_conf " name grep"
append_conf " match_regex $match_regex"
# we need "logical_op or" only in non-pattern sections
if [ "$pattern" = "0" ]; then
append_conf " logical_op or" # handle multiple filters
fi
}
create_kmsg_input_section() {
local tag="$1"
local max_sev=7
[ -z "$tag" ] && return
kmsg_tag_already_processed "$tag" && return
if [ -c "/dev/kmsg" ]; then
append_conf "[INPUT]"
append_conf " name kmsg"
append_conf " tag $tag"
# check kern facility (0)
if [ "$all_facilities" -eq 1 ] || [ "$kern_facility" -eq 1 ]; then
if [ "$severities" != "none" ]; then
# severity filtering
# only EqualOrHigher is supported by Prio_Level
# and only Log action is supported
# so set Prio_Level = max severity
if [ "$sev_action" = "0" ] && [ "$sev_compare" = "1" ]; then
if [ -n "$severities" ]; then
max_sev=$(echo $severities | tr ' ' '\n' | sort -n | tail -1)
fi
append_conf " prio_level $max_sev"
fi
fi
fi
append_conf ""
# if severities is none, or
# if kern facility has been excluded
# then we need to stop kernel logs
# sev_action and sev_compare is being checked because we don't want to work with rules that exclude logs
if [ "$severities" = "none" ] || { [ "$kern_facility" -eq 0 ] && [ "$all_facilities" -eq 0 ] && [ "$sev_action" = "0" ] && [ "$sev_compare" = "1" ]; }; then
# block all
# create a filter section that matches on KM* tag
# and excludes all messages
create_filter_section "KM*" "0"
append_conf " exclude message ^.*$"
append_conf ""
fi
fi
}
generate_syslog_filter() {
local param="regex"
[ "$sev_action" = "1" ] && param="exclude"
# start adding the fluent-bit filter section
create_filter_section "SL*" "0"
if [ "$severities" = "none" ]; then
append_conf " exclude pri ^.*$"
if [ "$facility_level" == "24" ]; then
# value 24 means all facility level, which is as good as not
# generating a filter section, so return
return
fi
for fval in $facilities; do
for sval in $severities; do
local pri=$((fval * 8 + sval))
append_conf " $param pri ^${pri}$"
# facility_level is a list value, hence, generate regex for
# each value
IFS=" "
for val in $facility_level; do
# as per rfc 5424 and 3164, pri in syslog msg is
# facility*8+severity. Severity value can range from 0-7 hence
# generate regex for each.
for sval in 0 1 2 3 4 5 6 7; do
pri=`expr $val \* 8 + $sval`
echo " regex pri $pri" >> ${TMP_CONF_FILE}
done
done
append_conf ""
}
generate_pattern_filter() {
local match_regex="$1"
local match_pattern="$2"
generate_severity_regex() {
local sev_level="$1"
local sev_compare="$2"
local sev_action="$3"
[ -z "$match_regex" ] && return
[ -z "$match_pattern" ] && return
local pri=0
local param="exclude"
# start adding the fluent-bit filter section
create_filter_section "$match_regex" "1"
append_conf " regex message $match_pattern"
append_conf ""
if [ "$sev_action" == "0" ]; then
param="regex"
fi
local fval=0
if [ "$sev_compare" == "0" ]; then
# generate regex for all facility values, with severity=sev_level
while [ $fval -le 23 ] ; do
pri=`expr $fval \* 8 + $sev_level`
echo " $param pri $pri" >> ${TMP_CONF_FILE}
fval=$((fval + 1))
done
elif [ "$sev_compare" == "1" ]; then
# generate regex for all severity value greater than or equal to
# sev_level. please, lower value have higher precedence, so sev_level
# 0 which is emergency has higher precedence than error which is 3
while [ $fval -le 23 ] ; do
sval=0
while [ $sev_level -ge $sval ]; do
pri=`expr $fval \* 8 + $sval`
echo " $param pri $pri" >> ${TMP_CONF_FILE}
sval=$((sval + 1))
done
fval=$((fval + 1))
done
fi
}
handle_filter_conf() {
local section="$1" # config filter
local filter_name="$2"
local name
# no need to proceed if name of filter section is not one of the values
# listed in option filter in config action section
config_get name $section name
if [ "$name" != "$filter_name" ]; then
return
fi
# as per data model, at a time either facility_level or severity_level can
# be specified along with pattern_match. hence, first process and generate
# regex for pattern_match which is common in both condition. Next, we will
# process facility_level and return if facility level is defined and not
# process severity related params at all.
local pattern_match
config_get pattern_match $section pattern_match
if [ -n "$pattern_match" ]; then
echo " regex $pattern_match" >> ${TMP_CONF_FILE}
fi
local facility_level
config_get facility_level $section facility_level
if [ -n "$facility_level" ]; then
generate_facility_regex $facility_level
# return from here since if facility_level is defined, then no
# need to process severity_level
return
fi
local sev_level
local sev_compare
local sev_action
config_get sev_level $section severity_level
if [ -n "$sev_level" ]; then
# value 1 of severity compare corresponds to data model
# and system default which is EqualorHigher
config_get sev_compare $section severity_compare 1
# value 0 of severity action corresponds to data model
# and system default that is log
config_get sev_action $section severity_action 0
generate_severity_regex $sev_level $sev_compare $sev_action
fi
}
create_filter_section() {
local match="$1"
echo "[FILTER]" >> ${TMP_CONF_FILE}
echo " name grep" >> ${TMP_CONF_FILE}
echo " match $match" >> ${TMP_CONF_FILE}
echo " logical_op or" >> ${TMP_CONF_FILE} # handle multiple filters
}
handle_filter_ref() {
local filter_name="$1"
config_foreach handle_filter_conf filter "$filter_name"
}
handle_log_file() {
local section="$1" # out_file section
local linker="$2"
local match_regex="$3"
local template="$4"
local match="$2"
local action_ref
config_get action_ref $section action
if [ "$action_ref" != "$linker" ]; then
if [ "$action_ref" != "$match" ]; then
return
fi
local enabled
config_get_bool enabled $section enable
if [ "$enabled" = "0" ]; then
config_get enabled $section enable
if [ "$enabled" == 0 ]; then
return
fi
local file
config_get file $section file
if [ -z "$file" ] || [ -z "$match_regex" ]; then
if [ -z "$file" ]; then
return
fi
append_conf "[OUTPUT]"
append_conf " name file"
append_conf " workers 2"
append_conf " match_regex $match_regex"
append_conf " file $file"
if [ -n "$template" ]; then
append_conf " format template"
append_conf " template ${template}"
fi
append_conf ""
echo "[OUTPUT]" >> ${TMP_CONF_FILE}
echo " name file" >> ${TMP_CONF_FILE}
echo " match $match" >> ${TMP_CONF_FILE}
echo " file $file" >> ${TMP_CONF_FILE}
echo " format template" >> ${TMP_CONF_FILE}
echo " template {time} {hostname} {ident}: {message}" >> ${TMP_CONF_FILE}
}
handle_log_remote() {
local section="$1"
local linker="$2"
local match_regex="$3"
local match="$2"
local action_ref
config_get action_ref $section action
if [ "$action_ref" != "$linker" ]; then
if [ "$action_ref" != "$match" ]; then
return
fi
local enabled
config_get_bool enabled $section enable
if [ "$enabled" = "0" ]; then
config_get enabled $section enable
if [ "$enabled" == 0 ]; then
return
fi
@@ -330,167 +228,83 @@ handle_log_remote() {
return
fi
append_conf "[OUTPUT]"
append_conf " name syslog"
append_conf " match_regex $match_regex"
append_conf " host $address"
echo "[OUTPUT]" >> ${TMP_CONF_FILE}
echo " name syslog" >> ${TMP_CONF_FILE}
echo " match $match" >> ${TMP_CONF_FILE}
echo " host $address" >> ${TMP_CONF_FILE}
append_conf " syslog_appname_key ident"
append_conf " syslog_procid_key pid"
append_conf " syslog_message_key message"
append_conf " syslog_hostname_key hostname"
local hostname="$(uci -q get 'system.@system[0].hostname')"
if [ -n "${hostname}" ]; then
append_conf " syslog_hostname_preset ${hostname}"
fi
local proto # holds value tcp or udp
config_get proto ${section} proto
if [ -n "$proto" ]; then
if [ "$proto" == "tls" ]; then
append_conf " mode tcp"
append_conf " tls on"
echo " mode tcp" >> ${TMP_CONF_FILE}
echo " tls on" >> ${TMP_CONF_FILE}
else
append_conf " mode $proto"
echo " mode $proto" >> ${TMP_CONF_FILE}
fi
fi
local port
config_get port $section port
if [ -n "$port" ]; then
append_conf " port $port"
echo " port $port" >> ${TMP_CONF_FILE}
fi
local cert
local peer_verify
config_get cert $section cert
if [ -n "$cert" ]; then
append_conf " tls.crt_file $cert"
echo " tls.crt_file $cert" >> ${TMP_CONF_FILE}
config_get_bool peer_verify $section peer_verify
if [ "$peer_verify" = "1" ]; then
append_conf " tls.verify on"
config_get peer_verify $section peer_verify
if [ "$peer_verify" == "1" ]; then
echo " tls.verify on" >> ${TMP_CONF_FILE}
fi
fi
append_conf ""
}
resolve_source_section() {
local src_section="$1"
local linker="$2"
local src_name syslog_en kernel_en
config_get src_name "$src_section" name
[ "$src_name" = "$linker" ] || return
config_get_bool syslog_en "$src_section" system_messages 1
config_get_bool kernel_en "$src_section" kernel_messages 1
# create an input section using /dev/log or kmsg
# and store the tag in a variable
# so that later a regex can be made to match this tag
# which will be used in output section
if [ "$syslog_en" = "1" ]; then
source_tag_syslog="SL$src_name"
create_input_section "$source_tag_syslog"
fi
if [ "$kernel_en" = "1" ]; then
source_tag_kmsg="KM$src_name"
create_kmsg_input_section "$source_tag_kmsg"
fi
}
# get the value of option expression from the relevant section
resolve_template_section() {
local tmpl_section="$1"
local tmpl_name
config_get tmpl_name "$tmpl_section" name
[ "$tmpl_name" = "$template_ref" ] || return
config_get template_expr "$tmpl_section" expression
[ -n "$template_expr" ] && echo "$template_expr"
}
# loop over template sections and get the value of option expression from the relevant section
get_template_expression() {
local template_ref="$1"
[ -n "$template_ref" ] && config_foreach resolve_template_section template
}
# build a regex that will match all the tags supplied to this function
build_match_regex() {
local tags="$1"
local first=1
local regex="^("
for tag in $tags; do
[ "$first" -eq 1 ] && first=0 || regex="$regex|"
regex="$regex$tag"
done
regex="$regex)\$"
echo "$regex"
}
handle_filter_conf() {
local section="$1" # config filter
local filter_name="$2"
local name
config_get name $section name
[ "$name" = "$filter_name" ] || return
populate_allowed_logs "$filter_name"
}
handle_action() {
local tag_regex filter source_ref template_ref source_sec log_template finst
local action_section="$1"
local source_tag_syslog source_tag_kmsg
local section="$1"
# shared variables set by populate_allowed_logs
match_pattern=""
facilities=""
all_facilities=0
kern_facility=1
severities=""
sev_compare=1
sev_action=0
local filter
config_get filter $section filter
config_get action_name "$action_section" name
config_get filter "$action_section" filter
config_get source_ref "$action_section" source
config_get template_ref "$action_section" template
# use config action option name as tag for input
local tag
config_get tag $section name
if [ -z "$tag" ]; then
return
fi
[ -z "$action_name" ] && return
[ -z "$source_ref" ] && return
# read filter section and populate relevant variables
# these variables will be used by create_kmsg_input_section
# generate_syslog_filter, and generate_pattern_filter functions
create_input_section $tag
if [ -n "$filter" ]; then
# the only fluentbit filter that is useful for the datamodel is
# grep. Also, fluentbit does not seem to handle multiple instances
# of FILTER of same kind. Hence, each filter section corresponding
# to an action entry in the uci would translate for us into a set of
# regex/exclude values instead of individual FILTER section per uci
# section filter is a list, treat according
create_filter_section $tag
IFS=" "
for finst in $filter; do
config_foreach handle_filter_conf filter "$finst"
handle_filter_ref $finst
done
fi
# Resolve referenced source sections
for source_sec in $source_ref; do
config_foreach resolve_source_section source "$source_sec"
done
# build a regex that will match all the sources for this action
tag_regex=$(build_match_regex "$source_tag_syslog $source_tag_kmsg")
if [ -n "$filter" ]; then
generate_pattern_filter "$tag_regex" "$match_pattern"
generate_syslog_filter
fi
# get the template expression if any is present
log_template="$(get_template_expression "$template_ref")"
# handle output, each action can be associated with an out_log and out_syslog
# handle output, each action can be associated with a out_log and out_syslog
# section so figure out if any out_log or out_syslog section is associated
# with this and action and setup output accordingly.
config_foreach handle_log_file log_file "$action_name" "$tag_regex" "$log_template"
config_foreach handle_log_remote log_remote "$action_name" "$tag_regex"
config_foreach handle_log_file log_file "$tag"
config_foreach handle_log_remote log_remote "$tag"
}
handle_action_section() {
@@ -506,14 +320,13 @@ logmngr_init() {
create_config_file
create_service_section
create_default_filters
handle_action_section
if [ -f /lib/logmngr/logrotate.sh ]; then
logrotate_init
fi
if [ "$enabled" = "0" ]; then
if [ "$enabled" == "0" ]; then
return
fi
@@ -527,4 +340,9 @@ logmngr_init() {
fi
procd_set_param respawn
procd_close_instance
procd_open_instance klogd
procd_set_param command /usr/libexec/logmngr-klogd
procd_set_param respawn
procd_close_instance
}

View File

@@ -0,0 +1,7 @@
#!/bin/sh
until [ -S /dev/log ]; do
sleep 1
done
exec /sbin/klogd -n

View File

@@ -5,9 +5,9 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=map-agent
PKG_VERSION:=6.3.6.15
PKG_VERSION:=6.3.5.8
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
PKG_SOURCE_VERSION:=7b05d265776fca2ff84a63188fcec55c08057e33
PKG_SOURCE_VERSION:=53bd2c211f495029377105a2b76490784fff9538
PKG_MAINTAINER:=Jakob Olsson <jakob.olsson@iopsys.eu>
PKG_LICENSE:=BSD-3-Clause

View File

@@ -17,12 +17,6 @@ for port_bridge_sec in $port_bridge_sec_list; do
fi
done
# Exit if the PORT Bridge Name is empty
[ -z "$port_bridge_name" ] && exit 0
# Exit if the PORT is not member of the AL Bridge
[ "$port_bridge_name" = "$al_bridge" ] || exit 0
# Exit if the device is not operating in extender/repeater mode
al_brnet="${al_bridge:3}"
[ "$(uci -q get network.${al_brnet}.proto)" == "dhcp" ] || exit 0
@@ -36,9 +30,16 @@ fi
################ Dedicated ETH WAN Port ################
wanport="$(jsonfilter -i /etc/board.json -e @.network.wan.device)"
if [ -n "$wanport" ]; then
# Don't require wanport to be part of port bridge
[ "$wanport" = "$PORT" ] || exit 0
########################################################
else
# Exit if the PORT Bridge Name is empty
[ -z "$port_bridge_name" ] && exit 0
# Exit if the PORT is not member of the AL Bridge
[ "$port_bridge_name" = "$al_bridge" ] || exit 0
#################### DHCP Discovery ####################
if [ "$LINK" = "up" ]; then
brctl delif $al_bridge $PORT
@@ -87,6 +88,8 @@ update_bstas() {
wpa_cli -i "$ifname" enable_network $network_id > /dev/null 2>&1
# wpa_cli -i "$ifname" save_config > /dev/null 2>&1
fi
echo "wpa_cli -i $ifname $action id $network_id" > /dev/console
}
if [ "$LINK" = "up" ]; then
@@ -95,8 +98,12 @@ if [ "$LINK" = "up" ]; then
config_foreach remove_from_bridge bsta
config_foreach update_bstas bsta down
brctl addif $al_bridge $PORT
echo "brctl addif $al_bridge $PORT" > /dev/console
/lib/wifi/multiap set_uplink "eth" "$PORT"
else
brctl delif $al_bridge $PORT
echo "brctl delif $al_bridge $PORT" > /dev/console
/lib/wifi/multiap unset_uplink "eth"
#rm -f "$map_bh_file"
config_load "mapagent"

View File

@@ -258,6 +258,6 @@ map_genconf () {
config_foreach mapcontroller_remove_mld_id ap
}
fi
ubus -t 5 call uci commit '{"config":"mapcontroller"}'
uci -q commit mapcontroller
fi
}

View File

@@ -6,9 +6,9 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=map-controller
PKG_VERSION:=6.4.2.9
PKG_VERSION:=6.3.0.19
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
PKG_SOURCE_VERSION:=c427bbaa53ce470a45a59326281f214f1111c3f7
PKG_SOURCE_VERSION:=1b7df5906c9953e1b2fea793b17ff29b2ccc9445
PKG_MAINTAINER:=Jakob Olsson <jakob.olsson@genexis.eu>
LOCAL_DEV=0
@@ -74,29 +74,12 @@ ifeq ($(CONFIG_CONTROLLER_PROPAGATE_PROBE_REQ),y)
TARGET_CFLAGS += -DPROPAGATE_PROBE_REQ
endif
define Build/InstallDev
$(INSTALL_DIR) $(1)/usr/include/map-controller
$(INSTALL_DIR) $(1)/usr/include/map-controller/utils
$(CP) $(PKG_BUILD_DIR)/src/wifi_dataelements.h $(1)/usr/include/map-controller
$(CP) $(PKG_BUILD_DIR)/src/cntlr_commands_impl.h $(1)/usr/include/map-controller
$(CP) $(PKG_BUILD_DIR)/src/cntlr_commands.h $(1)/usr/include/map-controller
$(CP) $(PKG_BUILD_DIR)/src/cntlr_apis.h $(1)/usr/include/map-controller
$(CP) $(PKG_BUILD_DIR)/src/wifi_opclass.h $(1)/usr/include/map-controller
$(CP) $(PKG_BUILD_DIR)/src/steer_module.h $(1)/usr/include/map-controller
$(CP) $(PKG_BUILD_DIR)/src/timer.h $(1)/usr/include/map-controller
$(CP) $(PKG_BUILD_DIR)/src/timer_impl.h $(1)/usr/include/map-controller
$(CP) $(PKG_BUILD_DIR)/src/utils/debug.h $(1)/usr/include/map-controller/utils
$(INSTALL_DIR) $(1)/usr/lib
$(CP) $(PKG_BUILD_DIR)/src/libcntlr-apis.so $(1)/usr/lib
endef
define Package/map-controller/install
$(INSTALL_DIR) $(1)/etc
$(CP) ./files/* $(1)/
$(INSTALL_DIR) $(1)/usr/sbin
$(INSTALL_BIN) $(PKG_BUILD_DIR)/src/mapcontroller $(1)/usr/sbin/
$(INSTALL_DIR) $(1)/usr/lib/mapcontroller
$(CP) $(PKG_BUILD_DIR)/src/libcntlr-apis.so $(1)/usr/lib
$(CP) $(PKG_BUILD_DIR)/src/plugins/steer/rcpi/rcpi.so $(1)/usr/lib/mapcontroller/rcpi.so
endef

View File

@@ -2,13 +2,15 @@ config controller 'controller'
option enabled '1' # may be modified by other package start-up scripts (i.e. map-agent)
option profile '3'
option registrar '2 5 6'
option debug '2'
option debug '0'
option bcn_metrics_max_num '10'
option initial_channel_scan '0'
option enable_ts '0'
option primary_vid '1'
option primary_pcp '0'
option allow_bgdfs '0'
option stale_sta_timeout '30d'
option channel_plan '0'
option de_collect_interval '60'
config sta_steering
@@ -24,10 +26,6 @@ config sta_steering
option plugins_policy 'any'
list plugins 'rcpi'
config channel_plan
option preclear_dfs '0'
option acs '0'
###################
# Default AP sections credentials will by updated
# by uci-defaults script 99-mapcntlr

View File

@@ -71,7 +71,7 @@ validate_ap_section() {
'encryption:or("sae", "sae+aes", "psk2",
"psk2+aes", "sae-mixed", "sae-mixed+aes",
"none", "psk-mixed", "psk-mixed+aes",
"psk", "psk+aes", "wpa", "wpa2", "wpa-mixed")' \
"psk", "psk+aes")' \
'key:string' \
'vid:range(1,65535):1' \
'type:or("backhaul", "fronthaul", "combined")' \

View File

@@ -1,11 +0,0 @@
if (PACKAGE_map-plugins)
menu "Options"
config STEER_RATE_PLUGIN
bool "STA steering based on estimated throughput of target-AP"
default PACKAGE_map-plugins-steer-rate
endmenu
endif

View File

@@ -1,68 +0,0 @@
#
# Copyright (C) 2025 Genexis Sweden AB
#
include $(TOPDIR)/rules.mk
PKG_NAME:=map-plugins
PKG_VERSION:=1.0.32
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=d8b310edad1b9777aed805682394e3f9bb300d81
PKG_SOURCE_URL:=https://dev.iopsys.eu/multi-ap/map-plugins.git
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)_$(PKG_SOURCE_VERSION).tar.xz
PKG_MIRROR_HASH:=skip
endif
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
PKG_LICENSE:=PROPRIETARY GENEXIS
PKG_LICENSE_FILES:=LICENSE
PKG_CONFIG_DEPENDS := CONFIG_PACKAGE_mapcontroller
PKG_BUILD_DEPENDS := map-controller
include $(INCLUDE_DIR)/package.mk
include $(wildcard plugins/*.mk)
MAKE_FLAGS += \
CFLAGS="$(TARGET_CFLAGS) -Wall"
plugins := \
$(if $(CONFIG_PACKAGE_map-plugins-steer-rate),steer-rate) \
$(if $(CONFIG_PACKAGE_map-plugins-bsteer),bsteer)
ppkg:=$(patsubst plugins/%.mk,map-plugins-%,$(wildcard plugins/*.mk))
define Package/map-plugins/Default
SECTION:=utils
CATEGORY:=Utilities
SUBMENU:=Multi-AP value added services
endef
define Package/map-plugins/config
source "$(SOURCE)/Config.in"
endef
define Package/map-plugins
$(call Package/map-plugins/Default)
TITLE:=Multi-AP plugins modules
DEPENDS+=+libeasy +libwifiutils +map-controller
endef
define Package/map-plugins/description
Provides extra Multi-AP services viz. steering, channel-planning, self-organizing network etc.
endef
define Package/map-plugins/install
:
endef
define Build/Compile
$(foreach p,$(plugins),$(call Build/Compile/map-plugins-$(p), $(1)))
endef
$(eval $(call BuildPackage,map-plugins))
$(eval $(foreach p,$(ppkg),$(call BuildPackage,$(p))))

View File

@@ -1,20 +0,0 @@
define Package/map-plugins-bsteer
$(call Package/map-plugins/Default)
TITLE:=Wi-Fi backhaul steering plugin based on maximizing backhaul throughput
DEPENDS= +libubox +libuci +libubus +libeasy +libnl-genl \
+libjson-c +libblobmsg-json +map-controller \
+map-plugins
endef
define Package/map-plugins-bsteer/install
$(INSTALL_DIR) $(1)/usr/lib
$(INSTALL_DIR) $(1)/usr/lib/mapcontroller
$(CP) $(PKG_BUILD_DIR)/steer/bsteer/bsteer.so $(1)/usr/lib/mapcontroller/bsteer.so
endef
define Build/Compile/map-plugins-bsteer
$(MAKE) -C $(PKG_BUILD_DIR)/steer/bsteer \
CC="$(TARGET_CC)" \
CFLAGS="$(TARGET_CFLAGS)" \
LDFLAGS="$(TARGET_LDFLAGS)";
endef

View File

@@ -1,20 +0,0 @@
define Package/map-plugins-steer-rate
$(call Package/map-plugins/Default)
TITLE:=STA steering based on estimated throughput of target-AP
DEPENDS= +libubox +libuci +libubus +libeasy +libnl-genl \
+libjson-c +libblobmsg-json +map-controller \
+map-plugins
endef
define Package/map-plugins-steer-rate/install
$(INSTALL_DIR) $(1)/usr/lib
$(INSTALL_DIR) $(1)/usr/lib/mapcontroller
$(CP) $(PKG_BUILD_DIR)/steer/rate/rate.so $(1)/usr/lib/mapcontroller/rate.so
endef
define Build/Compile/map-plugins-steer-rate
$(MAKE) -C $(PKG_BUILD_DIR)/steer/rate \
CC="$(TARGET_CC)" \
CFLAGS="$(TARGET_CFLAGS)" \
LDFLAGS="$(TARGET_LDFLAGS)";
endef

View File

@@ -65,69 +65,12 @@ generate_igmp_proxy_config(){
uci add_list mcast.@proxy[-1].filter="239.0.0.0/8"
}
generate_mld_snooping_config(){
local u_itf="$1"
uci add mcast snooping
uci rename mcast.@snooping[-1]="mc_snooping_MLD"
uci set mcast.@snooping[-1].enable="1"
uci set mcast.@snooping[-1].proto="mld"
uci set mcast.@snooping[-1].version="2"
uci set mcast.@snooping[-1].robustness="2"
uci set mcast.@snooping[-1].query_interval="125"
uci set mcast.@snooping[-1].query_response_interval="100"
uci set mcast.@snooping[-1].last_member_query_interval="10"
uci set mcast.@snooping[-1].fast_leave="1"
uci set mcast.@snooping[-1].snooping_mode="2"
uci set mcast.@snooping[-1].interface="$u_itf"
}
generate_igmp_snooping_config(){
local u_itf="$1"
uci add mcast snooping
uci rename mcast.@snooping[-1]="igmp_snooping_1"
uci set mcast.@snooping[-1].enable="1"
uci set mcast.@snooping[-1].proto="igmp"
uci set mcast.@snooping[-1].version="2"
uci set mcast.@snooping[-1].robustness="2"
uci set mcast.@snooping[-1].query_interval="125"
uci set mcast.@snooping[-1].query_response_interval="100"
uci set mcast.@snooping[-1].last_member_query_interval="10"
uci set mcast.@snooping[-1].fast_leave="1"
uci set mcast.@snooping[-1].snooping_mode="2"
uci set mcast.@snooping[-1].interface="$u_itf"
uci add_list mcast.@snooping[-1].filter="239.0.0.0/8"
}
check_wan_bridge() {
local config="$1"
local wan_device="$2"
local name type
[ $((is_wan_bridge)) -ne 0 ] && return
config_get type "$config" type
config_get name "$config" name
[ "$type" = "bridge" -a "$wan_device" = "$name" ] && is_wan_bridge=1
}
generate_mcast_config(){
local up_itf="$(uci -q get network.wan.device)"
local is_wan_bridge=0
config_load network
config_foreach check_wan_bridge device "$up_itf"
up_itf="$(uci -q get network.wan.device)"
if [ $((is_wan_bridge)) -eq 0 ]; then
generate_igmp_proxy_config "$up_itf"
generate_mld_proxy_config "$up_itf"
else
generate_igmp_snooping_config "$up_itf"
generate_mld_snooping_config "$up_itf"
fi
generate_igmp_proxy_config "$up_itf"
generate_mld_proxy_config "$up_itf"
}
interfaces_ok(){

View File

@@ -1,140 +1,2 @@
#!/bin/sh
. /lib/functions.sh
. /usr/share/libubox/jshn.sh
ZONE_NAME_FILE="/tmp/mcast_fw_zone"
log() {
echo "${@}"|logger -t firewall.mcast -p info
}
collect_zone_name() {
local name network
config_get name "${1}" name ""
if [ -z "${name}" ]; then
return
fi
config_get network "${1}" network ""
for i in ${network}; do
var="${i}_zone"
echo "${var}=${name}" >> "${ZONE_NAME_FILE}"
done
}
load_zone_names() {
rm -f "${ZONE_NAME_FILE}"
config_load firewall
config_foreach collect_zone_name zone
}
get_firewall_zone() {
if [ ! -f "${ZONE_NAME_FILE}" ]; then
echo ""
return
fi
var="${1}_zone="
name="$(cat ${ZONE_NAME_FILE} | grep ${var} | head -n 1 | cut -d'=' -f 2)"
echo "${name}"
}
# Get interface name for a device (e.g., br-lan -> lan)
find_interface_for_device() {
local dev="${1}"
local intf=""
local intf_dump idx
if [ -z "${dev}" ]; then
echo ""
return
fi
intf_dump="$(ubus -t 5 call network.interface dump)"
if [ -z "${intf_dump}" ]; then
echo ""
return
fi
json_load "${intf_dump}"
json_select interface
if [ "$?" -ne 0 ]; then
echo ""
return
fi
idx=1
while json_is_a ${idx} object; do
json_select ${idx}
if [ "$?" -ne 0 ]; then
break
fi
json_get_var device device
if [ "${device}" = "${dev}" ]; then
json_get_var intf interface
break
fi
idx=$(( idx + 1 ))
json_select ..
done
echo "${intf}"
}
# Setup iptables rule to allow multicast from upstream to downstream
setup_multicast_rule() {
local upstream_dev="$1"
local downstream_dev="$2"
local upstream_zone downstream_zone
local upstream_iface downstream_iface
upstream_iface=$(find_interface_for_device "$upstream_dev")
downstream_iface=$(find_interface_for_device "$downstream_dev")
[ -z "$upstream_iface" ] || [ -z "$downstream_iface" ] && {
log "Failed to map devices to interfaces"
return
}
upstream_zone=$(get_firewall_zone "$upstream_iface")
downstream_zone=$(get_firewall_zone "$downstream_iface")
[ -z "$upstream_zone" ] || [ -z "$downstream_zone" ] && {
log "Failed to map interfaces to zones"
return
}
iptables -w -t filter -A zone_${upstream_zone}_forward -p udp \
-d 224.0.0.0/240.0.0.0 \
-m comment --comment "!fw3: Allow-Multicast-UDP" \
-j zone_${downstream_zone}_dest_ACCEPT
}
apply_mcast_rule() {
local cfg="$1"
local up down proto
config_get proto "$cfg" proto
[ "$proto" = "igmp" ] || return
config_get up "$cfg" upstream_interface
config_get down "$cfg" downstream_interface
[ -n "$up" ] && [ -n "$down" ] && setup_multicast_rule "$up" "$down"
}
add_multicast_rules() {
config_load mcast
config_foreach apply_mcast_rule proxy
}
load_zone_names
add_multicast_rules
rm -f "${ZONE_NAME_FILE}"
# Forward multicast packets from wan to lan
iptables -w -t filter -A zone_wan_forward -p udp -d 224.0.0.0/240.0.0.0 -m comment --comment "!fw3: Allow-Multicast-UDP" -j zone_lan_dest_ACCEPT

View File

@@ -5,13 +5,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=netmngr
PKG_VERSION:=1.1.8
PKG_VERSION:=1.1.7
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/network/netmngr.git
PKG_SOURCE_VERSION:=6310f32b80f8abeccbf99ad55ce88792b19342d6
PKG_SOURCE_VERSION:=ce4add331e7044e5d2d39f8a7adac84a0457680e
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif

View File

@@ -8,7 +8,7 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=netmode
PKG_VERSION:=1.1.5
PKG_VERSION:=1.1.4
PKG_RELEASE:=1
PKG_BUILD_DIR := $(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
PKG_LICENSE:=GPL-2.0-only

View File

@@ -11,19 +11,23 @@
"supported_args": [
{
"name": "username",
"description": "PPPoE username",
"description": "PPoE username",
"required": true,
"type": "string",
"#value": "TestUser"
},
{
"name": "password",
"description": "PPPoE password",
"description": "PPoE password",
"required": true,
"type": "string",
"#value": "TestPassword"
}
]
},
{
"name": "bridged",
"description": "Bridged mode (Layer 2)"
}
]
}

View File

@@ -80,7 +80,11 @@ l2_network_config() {
json_select ..
json_select wan 2>/dev/null
json_get_var device device
[ -n "$device" ] && uci add_list network.br_lan.ports="$device"
[ -n "$device" ] && {
# dedicated wan port - create uplink device
uci -q set network.l2wan=interface
uci -q set network.l2wan.device="$device"
}
json_cleanup
fi

View File

@@ -1,42 +0,0 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=nlohmann-json
PKG_VERSION:=3.11.2
PKG_RELEASE:=1
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://github.com/nlohmann/json.git
PKG_SOURCE_VERSION:=v$(PKG_VERSION)
PKG_MIRROR_HASH:=skip
PKG_LICENSE:=MIT
PKG_LICENSE_FILES:=LICENSE.MIT
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
include $(INCLUDE_DIR)/package.mk
define Package/nlohmann-json
SECTION:=libs
CATEGORY:=Libraries
TITLE:=JSON for Modern C++ (nlohmann/json)
endef
define Package/nlohmann-json/description
JSON for Modern C++ is a single-header C++ library for working with JSON.
endef
define Build/Compile
# Header-only, nothing to compile
true
endef
define Build/InstallDev
$(INSTALL_DIR) $(1)/usr/include/nlohmann
$(CP) $(PKG_BUILD_DIR)/single_include/nlohmann/json.hpp $(1)/usr/include/nlohmann/
endef
define Package/nlohmann-json/install
true
endef
$(eval $(call BuildPackage,nlohmann-json))

View File

@@ -1,43 +0,0 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=nng
PKG_VERSION:=1.11
PKG_RELEASE:=1
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://github.com/nanomsg/nng.git
PKG_SOURCE_VERSION:=v$(PKG_VERSION)
PKG_MIRROR_HASH:=skip
PKG_LICENSE:=MIT
PKG_LICENSE_FILES:=LICENSE.txt
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
PKG_INSTALL_DIR:=$(PKG_BUILD_DIR)/ipkg-install
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/cmake.mk
define Package/nng
SECTION:=libs
CATEGORY:=Libraries
TITLE:=NNG - nanomsg-next-gen
URL:=https://github.com/nanomsg/nng
DEPENDS:=+libpthread +libatomic
endef
define Package/nng/description
NNG is a lightweight messaging library, a successor to nanomsg.
endef
CMAKE_OPTIONS += \
-DBUILD_SHARED_LIBS=ON \
-DNNG_ENABLE_EXAMPLES=OFF \
-DNNG_ENABLE_TESTS=OFF
define Package/nng/install
$(INSTALL_DIR) $(1)/usr/lib
$(CP) $(PKG_INSTALL_DIR)/usr/lib/libnng.so* $(1)/usr/lib/
endef
$(eval $(call BuildPackage,nng))

View File

@@ -19,8 +19,13 @@ config OBUSPA_CONTROLLER_MTP_VERIFY
bool "Enable verification of controller MTP before processing the message"
default n
config OBUSPA_LOCAL_MQTT_LISTENER
bool "Configures local mqtt broker for local usp connections"
config OBUSPA_ENABLE_TEST_CONTROLLER
bool "Adds a test controller by default"
default n
select OBUSPA_ENABLE_TEST_CONTROLLER_LOCAL
config OBUSPA_ENABLE_TEST_CONTROLLER_LOCAL
bool "Adds a test controller by default (local access only)"
default n
config OBUSPA_MAX_CONTROLLERS_NUM
@@ -65,13 +70,4 @@ config OBUSPA_VENDOR_PREFIX
config OBUSPA_OVERRIDE_CT_ROLE
bool "Override ControllerTrust role with factory default roles"
default y
config OBUSPA_VC_POLL_PERIOD
int "Polling interval in sec for ValueChange Subscriptions"
range 15 120
default "15"
help
Obuspa relies on a polling mechanism to determine the ValueChange, Add, Del Subscriptions.
Short poll period could give more responsive subscription events, but might delay overall experience in slower CPUs with good amount of subscriptions, on the other hand, a bigger value results into less responsive subscription handling. This value must be in range of 15 to 120 secs. (default 15)
endif

View File

@@ -5,13 +5,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=obuspa
PKG_VERSION:=10.0.0.16
PKG_VERSION:=10.0.0.9
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/bbf/obuspa.git
PKG_SOURCE_VERSION:=479ffb3582aa245a84829502d9412ca2539eefca
PKG_SOURCE_VERSION:=3e9299063e3c65565d2c834b5ab654fda830f749
PKG_MAINTAINER:=Vivek Dutta <vivek.dutta@iopsys.eu>
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
@@ -33,7 +33,8 @@ define Package/obuspa
TITLE:=USP agent
MENU:=1
DEPENDS:=+libopenssl +libuci +libblobmsg-json +libcurl +libsqlite3 +libubox +libubus +libmosquitto-ssl +libwebsockets-openssl +ca-certificates \
+OBUSPA_LOCAL_MQTT_LISTENER:mosquitto-ssl +libjson-c
+OBUSPA_ENABLE_TEST_CONTROLLER_LOCAL:mosquitto-ssl +OBUSPA_ENABLE_TEST_CONTROLLER_LOCAL:mosquitto-client-ssl \
+OBUSPA_ENABLE_TEST_CONTROLLER:mosquitto-auth-shadow +libjson-c
DEPENDS+=+libbbfdm-api +libbbfdm-ubus +dm-service
endef
@@ -106,10 +107,6 @@ ifneq ($(CONFIG_OBUSPA_MAX_CONTROLLERS_NUM),)
TARGET_CFLAGS += -DOBUSPA_MAX_CONTROLLERS_NUM=$(CONFIG_OBUSPA_MAX_CONTROLLERS_NUM)
endif
ifneq ($(CONFIG_OBUSPA_VC_POLL_PERIOD),)
TARGET_CFLAGS += -DOBUSPA_VC_POLL_PERIOD=$(CONFIG_OBUSPA_VC_POLL_PERIOD)
endif
ifeq ($(LOCAL_DEV),1)
define Build/Prepare
$(CP) -rf ~/git/obuspa/* $(PKG_BUILD_DIR)/
@@ -131,23 +128,27 @@ define Package/obuspa/install
$(INSTALL_DATA) ./files/etc/users/roles/*.json $(1)/etc/users/roles/
$(INSTALL_DATA) ./files/etc/obuspa/usp_utils.sh $(1)/etc/obuspa/
echo "$(VENDOR_PREFIX)" > $(1)/etc/obuspa/vendor_prefix
$(INSTALL_DATA) ./files/etc/uci-defaults/01-fix-upgrade-uci $(1)/etc/uci-defaults/
$(INSTALL_DATA) ./files/etc/uci-defaults/60-generate-ctrust-defaults $(1)/etc/uci-defaults/
$(INSTALL_DATA) ./files/etc/uci-defaults/obuspa-set-dhcp-option $(1)/etc/uci-defaults/
$(INSTALL_DATA) ./files/etc/uci-defaults/92-obuspa_firewall $(1)/etc/uci-defaults/
$(INSTALL_DATA) ./files/etc/uci-defaults/93-obuspa_mdns_adv $(1)/etc/uci-defaults/
$(INSTALL_DATA) ./files/etc/uci-defaults/94-obuspa_set_credential $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/uci-defaults/01-fix-upgrade-uci $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/uci-defaults/60-generate-ctrust-defaults $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/uci-defaults/obuspa-set-dhcp-option $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/uci-defaults/92-obuspa_firewall $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/uci-defaults/93-obuspa_mdns_adv $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/uci-defaults/94-obuspa_set_credential $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/firewall.usp $(1)/etc/
$(INSTALL_BIN) ./files/etc/udhcpc.user.d/udhcpc_obuspa_opt125.user $(1)/etc/udhcpc.user.d/
$(INSTALL_BIN) ./files/etc/udhcpc.user.d/udhcpc_obuspa_opt125.user $(1)/etc/udhcpc.user.d/udhcpc_obuspa_opt125.user
ifeq ($(CONFIG_OBUSPA_CWMP_DATAMODEL_SUPPORT),y)
$(BBFDM_REGISTER_SERVICES) ./bbfdm_service.json $(1) $(PKG_NAME)
$(BBFDM_INSTALL_MS_DM) $(PKG_BUILD_DIR)/libuspagentdm.so $(1) $(PKG_NAME)
endif
ifeq ($(CONFIG_OBUSPA_LOCAL_MQTT_LISTENER),y)
$(INSTALL_DATA) ./files/etc/uci-defaults/55-obuspa-local-mqtt-usp-connection $(1)/etc/uci-defaults/
ifeq ($(CONFIG_OBUSPA_ENABLE_TEST_CONTROLLER),y)
$(INSTALL_BIN) ./files/etc/uci-defaults/54-test-usp-remote $(1)/etc/uci-defaults/
endif
ifeq ($(CONFIG_OBUSPA_ENABLE_TEST_CONTROLLER_LOCAL),y)
$(INSTALL_BIN) ./files/etc/init.d/usptest $(1)/etc/init.d/
$(INSTALL_BIN) ./files/etc/uci-defaults/55-test-usp-controller $(1)/etc/uci-defaults/
endif
ifeq ($(CONFIG_OBUSPA_OVERRIDE_CT_ROLE),y)
$(INSTALL_DATA) ./files/etc/uci-defaults/61-override-ct-roles $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/uci-defaults/61-override-ct-roles $(1)/etc/uci-defaults/
endif
endef

View File

@@ -10,4 +10,4 @@ config obuspa 'global'
#option client_cert '/etc/obuspa/client.pem'
option log_dest 'syslog'
option dm_caching_exclude '/etc/obuspa/transient_dm.json'
option dualstack_pref 'IPv6'

View File

@@ -278,11 +278,6 @@ update_reset_reason()
fi
}
update_dual_stack_pref()
{
db_set Internal.DualStackPreference "${1}"
}
get_role_index()
{
local name drole
@@ -994,8 +989,6 @@ db_init()
#log "Create reset file ...."
config_load $CONFIGURATION
config_get dualstack_pref global dualstack_pref "IPv6"
global_init
config_foreach configure_localagent localagent
global_init
@@ -1010,10 +1003,8 @@ db_init()
config_foreach configure_subscription subscription
global_init
config_foreach configure_challenges challenge
global_init
update_reset_reason
update_dual_stack_pref "${dualstack_pref}"
uci_commit ${CONFIGURATION}
cp /etc/config/obuspa /tmp/obuspa/

75
obuspa/files/etc/init.d/usptest Executable file
View File

@@ -0,0 +1,75 @@
#!/bin/sh /etc/rc.common
START=99
STOP=01
USE_PROCD=1
log()
{
echo "$*"|logger -t usptest -p debug
}
get_oui_from_db()
{
db -q get device.deviceinfo.ManufacturerOUI
}
get_serial_from_db()
{
db -q get device.deviceinfo.SerialNumber
}
publish_endpoint()
{
local AgentEndpointID serial oui user pass
if ! uci -q get obuspa.testmqtt; then
return 0;
fi
# return if mosquitto_pub is not present
if [ ! "$(command -v mosquitto_pub)" ]; then
log "mosquitto_pub not present can't publish EndpointID"
return 0;
fi
sleep 2
# Get endpoint id from obuspa config first
config_load obuspa
config_get AgentEndpointID localagent EndpointID ""
if [ -z "${AgentEndpointID}" ]; then
serial=$(get_serial_from_db)
oui=$(get_oui_from_db)
AgentEndpointID="os::${oui}-${serial//+/%2B}"
fi
config_get user testmqtt Username ""
config_get pass testmqtt Password ""
# publish Agent's EndpointID in mosquito broker for discovery by usp-js
# This is a work around till obuspa adds supports for mDNS discovery
if [ -n "${user}" ] && [ -n "${pass}" ]; then
log "Publishing EndpointID ${AgentEndpointID} to local mqtt broker with username, password"
mosquitto_pub -r -t "obuspa/EndpointID" -m "${AgentEndpointID}" -u "${user}" -P "${pass}"
elif [ -n "${user}" ]; then
log "Publishing EndpointID ${AgentEndpointID} to local mqtt broker with username only"
mosquitto_pub -r -t "obuspa/EndpointID" -m "${AgentEndpointID}" -u "${user}"
else
log "Publishing EndpointID ${AgentEndpointID} to local mqtt broker"
mosquitto_pub -r -t "obuspa/EndpointID" -m "${AgentEndpointID}"
fi
}
start_service() {
procd_open_instance usptest
publish_endpoint
procd_close_instance
}
reload_service() {
publish_endpoint
}
service_triggers() {
procd_add_reload_trigger "mosquitto" "obuspa"
}

View File

@@ -0,0 +1,20 @@
#!/bin/sh
. /lib/functions.sh
if [ ! -f "/etc/config/mosquitto" ]; then
echo "Local mosquitto broker not available"
return 0
fi
add_usp_test()
{
uci_add mosquitto listener usptest
uci_set mosquitto usptest enabled 1
uci_set mosquitto usptest port '9004'
uci_set mosquitto usptest protocol 'websockets'
uci_set mosquitto usptest auth_plugin '/usr/lib/mosquitto_auth_shadow.so'
}
# Install test MQTT over WS listener
add_usp_test

View File

@@ -1,21 +0,0 @@
#!/bin/sh
. /lib/functions.sh
if [ ! -f "/etc/config/mosquitto" ]; then
echo "Local mosquitto broker not available"
return 0
fi
add_mqtt_obuspa_listener()
{
uci_add mosquitto listener obuspa
uci_set mosquitto obuspa enabled 1
uci_set mosquitto obuspa port '1883'
uci_set mosquitto obuspa no_remote_access '1'
uci_set mosquitto obuspa allow_anonymous '1'
}
# Add mosquitto listener for obuspa connection
# apps/controller should add controller definitions separately
add_mqtt_obuspa_listener

View File

@@ -0,0 +1,57 @@
#!/bin/sh
. /lib/functions.sh
if [ ! -f "/etc/config/obuspa" ]; then
echo "Local obuspa not available"
return 0
fi
if [ ! -f "/etc/config/mosquitto" ]; then
echo "Local mosquitto broker not available"
return 0
fi
add_obuspa_test_mtp()
{
uci_add obuspa mtp test_mtp
uci_set obuspa test_mtp Protocol 'MQTT'
uci_set obuspa test_mtp ResponseTopicConfigured '/usp/endpoint'
uci_set obuspa test_mtp mqtt 'testmqtt'
}
add_obuspa_test_mqtt()
{
# Adds Device.MQTT.Client.
uci_add obuspa mqtt testmqtt
uci_set obuspa testmqtt BrokerAddress '127.0.0.1'
uci_set obuspa testmqtt BrokerPort '1883'
uci_set obuspa testmqtt TransportProtocol 'TCP/IP'
}
add_obuspa_test_controller()
{
# Adds Device.LocalAgent.Controller.
uci_add obuspa controller testcontroller
uci_set obuspa testcontroller EndpointID 'proto::interop-usp-controller'
uci_set obuspa testcontroller Protocol 'MQTT'
uci_set obuspa testcontroller Topic '/usp/controller'
uci_set obuspa testcontroller mqtt 'testmqtt'
uci_set obuspa testcontroller assigned_role_name 'full_access'
}
add_obuspa_config()
{
uci_add mosquitto listener obuspa
uci_set mosquitto obuspa enabled 1
uci_set mosquitto obuspa port '1883'
uci_set mosquitto obuspa no_remote_access '1'
uci_set mosquitto obuspa allow_anonymous '1'
}
# Install test usp controller config
add_obuspa_config
add_obuspa_test_mtp
add_obuspa_test_mqtt
add_obuspa_test_controller

View File

@@ -35,16 +35,3 @@ Index: obuspa-10.0.0.2/src/core/data_model.c
return USP_ERR_OBJECT_DOES_NOT_EXIST;
}
diff --git a/src/core/mqtt.c b/src/core/mqtt.c
index 388697a..444b4da 100644
--- a/src/core/mqtt.c
+++ b/src/core/mqtt.c
@@ -4020,7 +4020,7 @@ void MessageV5Callback(struct mosquitto *mosq, void *userdata, const struct mosq
if (mosquitto_property_read_string(props, RESPONSE_TOPIC,
&response_info_ptr, false) == NULL)
{
- USP_LOG_Warning("%s: No response topic in received MESSAGE frame", __FUNCTION__);
+ USP_LOG_Info("%s: No response topic in received MESSAGE frame", __FUNCTION__);
}
// Send the message to the data model thread for further processing

View File

@@ -1,401 +0,0 @@
diff --git a/src/core/mqtt.c b/src/core/mqtt.c
index 70978501b1..96119fe080 100644
--- a/src/core/mqtt.c
+++ b/src/core/mqtt.c
@@ -53,6 +53,7 @@
#include <openssl/bio.h>
#include <openssl/err.h>
#include <openssl/x509v3.h>
+#include <netdb.h>
#include <mosquitto.h>
#include "mqtt.h"
@@ -201,8 +202,9 @@ int EnableMosquitto(mqtt_client_t *client);
void SetupCallbacks(mqtt_client_t *client);
void QueueUspConnectRecord_MQTT(mqtt_client_t *client, mtp_send_item_t *msi, char *controller_topic, time_t expiry_time);
int SendQueueHead(mqtt_client_t *client);
+bool check_connectivity(struct addrinfo *list_addr, unsigned int port, sa_family_t family, char *dst_ip, int size);
void Connect(mqtt_client_t *client);
-int PerformMqttClientConnect(mqtt_client_t *client);
+int PerformMqttClientConnect(mqtt_client_t *client, const char *host_ip);
int ConnectSetEncryption(mqtt_client_t *client);
void ConnectCallback(struct mosquitto *mosq, void *userdata, int result);
void ConnectV5Callback(struct mosquitto *mosq, void *userdata, int result, int flags, const mosquitto_property *props);
@@ -245,7 +247,7 @@ void HandleMqttReconnect(mqtt_client_t *client);
void HandleMqttReconnectAfterDisconnect(mqtt_client_t *client);
void HandleMqttDisconnect(mqtt_client_t *client);
void DisconnectIfAllSubscriptionsFailed(mqtt_client_t *client);
-bool IsMqttBrokerUp(mqtt_client_t *client);
+bool IsMqttBrokerUp(mqtt_client_t *client, char *dst_ip, int size);
void RemoveMqttQueueItem(mqtt_client_t *client, mqtt_send_item_t *queued_msg);
void RemoveExpiredMqttMessages(mqtt_client_t *client);
void ParseSubscribeTopicsFromConnack(mqtt_client_t *client, mosquitto_property *prop);
@@ -2350,6 +2352,143 @@ int SendQueueHead(mqtt_client_t *client)
return err;
}
+bool check_connectivity(struct addrinfo *list_addr, unsigned int port, sa_family_t family, char *dst_ip, int size)
+{
+ int err;
+ struct sockaddr_storage saddr;
+ socklen_t saddr_len;
+ sa_family_t sock_family;
+ int socket_fd = INVALID;
+ fd_set writefds;
+ struct timeval timeout;
+ int num_sockets;
+ int so_err;
+ socklen_t so_len = sizeof(so_err);
+ struct addrinfo *iterator;
+ struct sockaddr_in *a;
+ struct sockaddr_in6 *a6;
+ nu_ipaddr_t dst;
+
+ if (list_addr == NULL || dst_ip == NULL)
+ {
+ return false;
+ }
+
+ for (iterator=list_addr; iterator!=NULL; iterator=iterator->ai_next)
+ {
+ if (iterator->ai_family != family)
+ {
+ continue;
+ }
+
+ if (family == AF_INET)
+ {
+ a = (struct sockaddr_in *) iterator->ai_addr;
+ err = nu_ipaddr_from_inaddr(&a->sin_addr, &dst);
+
+ if (err != USP_ERR_OK)
+ {
+ USP_LOG_Error("%s: nu_ipaddr_from_inaddr() failed: %s", __FUNCTION__, strerror(err));
+ continue;
+ }
+ }
+ else
+ {
+ a6 = (struct sockaddr_in6 *) iterator->ai_addr;
+ err = nu_ipaddr_from_in6addr(&a6->sin6_addr, &dst);
+
+ if (err != USP_ERR_OK)
+ {
+ USP_LOG_Error("%s: nu_ipaddr_from_in6addr() failed: %s", __FUNCTION__, strerror(err));
+ continue;
+ }
+ }
+
+ // Next IP if unable to make a socket address structure to probe the MQTT server
+ err = nu_ipaddr_to_sockaddr(&dst, port, &saddr, &saddr_len);
+ if (err != USP_ERR_OK)
+ {
+ continue;
+ }
+
+ // Next IP if unable to determine which address family to use to contact the MQTT broker
+ err = nu_ipaddr_get_family(&dst, &sock_family);
+ if (err != USP_ERR_OK)
+ {
+ continue;
+ }
+
+ // Return if unable to create the socket
+ socket_fd = socket(sock_family, SOCK_STREAM, 0);
+ if (socket_fd == -1)
+ {
+ USP_LOG_Error("%s: failed to open socket", __FUNCTION__);
+ return false;
+ }
+
+ // Return if unable to set the socket as non blocking
+ // We do this before connecting so that we can timeout on connect taking too long
+ err = fcntl(socket_fd, F_SETFL, O_NONBLOCK);
+ if (err == -1)
+ {
+ USP_ERR_ERRNO("fcntl", errno);
+ close(socket_fd);
+ return false;
+ }
+
+ // Next IP if unable to connect to the MQTT Broker
+ // NOTE: The connect is performed in non-blocking mode
+ err = connect(socket_fd, (struct sockaddr *) &saddr, saddr_len);
+ if ((err == -1) && (errno != EINPROGRESS))
+ {
+ USP_ERR_ERRNO("connect", errno);
+ close(socket_fd);
+ continue;
+ }
+
+ // Set up arguments for the select() call
+ FD_ZERO(&writefds);
+ FD_SET(socket_fd, &writefds);
+ timeout.tv_sec = MQTT_CONNECT_TIMEOUT;
+ timeout.tv_usec = 0;
+
+ // Next IP if the connect timed out
+ num_sockets = select(socket_fd + 1, NULL, &writefds, NULL, &timeout);
+ if (num_sockets == 0)
+ {
+ USP_LOG_Error("%s: connect timed out", __FUNCTION__);
+ close(socket_fd);
+ continue;
+ }
+
+ // Next IP if unable to determine whether the connect was successful or not
+ err = getsockopt(socket_fd, SOL_SOCKET, SO_ERROR, &so_err, &so_len);
+ if (err == -1)
+ {
+ USP_ERR_ERRNO("getsockopt", errno);
+ close(socket_fd);
+ continue;
+ }
+
+ // Next IP if connect was not successful
+ if (so_err != 0)
+ {
+ USP_LOG_Error("%s: async connect failed", __FUNCTION__);
+ close(socket_fd);
+ continue;
+ }
+
+ close(socket_fd);
+
+ // Connect was successful
+ nu_ipaddr_str(&dst, dst_ip, size);
+
+ return true;
+ }
+
+ return false;
+}
+
/*********************************************************************//**
**
** IsMqttBrokerUp
@@ -2364,109 +2503,92 @@ int SendQueueHead(mqtt_client_t *client)
** \return true if the MQTT Broker is up, false otherwise
**
**************************************************************************/
-bool IsMqttBrokerUp(mqtt_client_t *client)
+bool IsMqttBrokerUp(mqtt_client_t *client, char *dst_ip, int size)
{
int err;
bool prefer_ipv6;
- nu_ipaddr_t dst;
- struct sockaddr_storage saddr;
- socklen_t saddr_len;
- sa_family_t family;
- int socket_fd = INVALID;
- fd_set writefds;
- struct timeval timeout;
- int num_sockets;
- int so_err;
- socklen_t so_len = sizeof(so_err);
+ bool ipv4_supported = false;
+ bool ipv6_supported = false;
bool result = false;
+ struct addrinfo *addr_list = NULL;
+ struct addrinfo hints;
// Get the preference for IPv4 or IPv6, if dual stack
prefer_ipv6 = DEVICE_LOCAL_AGENT_GetDualStackPreference();
- // Exit if unable to determine the IP address of the MQTT Broker
- err = tw_ulib_diags_lookup_host(client->conn_params.host, AF_UNSPEC, prefer_ipv6, NULL, &dst);
- if (err != USP_ERR_OK)
- {
- goto exit;
- }
+ USP_LOG_Debug("%s: dual-stack preference is: %s", __FUNCTION__, prefer_ipv6 ? "IPv6" : "IPv4");
- // Exit if unable to make a socket address structure to probe the MQTT server
- err = nu_ipaddr_to_sockaddr(&dst, client->conn_params.port, &saddr, &saddr_len);
- if (err != USP_ERR_OK)
- {
- goto exit;
- }
+ // Initialise the hints to use, when obtaining the IP address of the specified host using getaddrinfo()
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_flags |= AI_ADDRCONFIG;
- // Exit if unable to determine which address family to use to contact the MQTT broker
- // NOTE: This shouldn't fail if tw_ulib_diags_lookup_host() is correct
- err = nu_ipaddr_get_family(&dst, &family);
- if (err != USP_ERR_OK)
+ err = getaddrinfo(client->conn_params.host, NULL, &hints, &addr_list);
+ if (err != USP_ERR_OK || addr_list == NULL)
{
- goto exit;
+ USP_ERR_SetMessage("%s: getaddrinfo() failed to resolve: %s", __FUNCTION__, client->conn_params.host);
+ return result;
}
- // Exit if unable to create the socket
- socket_fd = socket(family, SOCK_STREAM, 0);
- if (socket_fd == -1)
+ // Exit if unable to determine which address families are supported by the device
+ // NOTE: In theory, setting getaddrinfo hints to AI_ADDRCONFIG, should filter by supported address family
+ // However, unfortunately that flag does not take into account whether the address is globally routable (for IPv6) as well
+ err = nu_ipaddr_get_ip_supported_families(&ipv4_supported, &ipv6_supported);
+ if (err != USP_ERR_OK)
{
- goto exit;
+ USP_LOG_Error("%s: Failed to determine device IP families", __FUNCTION__);
+ return result;
}
- // Exit if unable to set the socket as non blocking
- // We do this before connecting so that we can timeout on connect taking too long
- err = fcntl(socket_fd, F_SETFL, O_NONBLOCK);
- if (err == -1)
+ if (ipv4_supported == false && ipv6_supported == false)
{
- USP_ERR_ERRNO("fcntl", errno);
- goto exit;
+ // couldn't determine device ip mode
+ USP_LOG_Error("%s: Could not determine device IP families", __FUNCTION__);
+ return result;
}
- // Exit if unable to connect to the MQTT Broker
- // NOTE: The connect is performed in non-blocking mode
- err = connect(socket_fd, (struct sockaddr *) &saddr, saddr_len);
- if ((err == -1) && (errno != EINPROGRESS))
+ if (ipv6_supported == false)
{
- USP_ERR_ERRNO("connect", errno);
- goto exit;
+ // device has only ipv4 address so only try with ipv4 addresses
+ USP_LOG_Debug("%s: Check connectivity with IPv4 only", __FUNCTION__);
+ result = check_connectivity(addr_list, client->conn_params.port, AF_INET, dst_ip, size);
}
-
- // Set up arguments for the select() call
- FD_ZERO(&writefds);
- FD_SET(socket_fd, &writefds);
- timeout.tv_sec = MQTT_CONNECT_TIMEOUT;
- timeout.tv_usec = 0;
-
- // Exit if the connect timed out
- num_sockets = select(socket_fd + 1, NULL, &writefds, NULL, &timeout);
- if (num_sockets == 0)
+ else if (ipv4_supported == false)
{
- USP_LOG_Error("%s: connect timed out", __FUNCTION__);
- goto exit;
+ // device has only ipv6 address so only try with ipv6 addresses
+ USP_LOG_Debug("%s: Check connectivity with IPv6 only", __FUNCTION__);
+ result = check_connectivity(addr_list, client->conn_params.port, AF_INET6, dst_ip, size);
}
-
- // Exit if unable to determine whether the connect was successful or not
- err = getsockopt(socket_fd, SOL_SOCKET, SO_ERROR, &so_err, &so_len);
- if (err == -1)
+ else
{
- USP_ERR_ERRNO("getsockopt", errno);
- goto exit;
- }
+ // device has both ipv6 and ipv4 address, so we need to search as per preference
+ if (prefer_ipv6 == true)
+ {
- // Exit if connect was not successful
- if (so_err != 0)
- {
- USP_LOG_Error("%s: async connect failed", __FUNCTION__);
- goto exit;
+ // First try with v6 then fallback to v4
+ USP_LOG_Debug("%s: Check connectivity with IPv6 first", __FUNCTION__);
+ result = check_connectivity(addr_list, client->conn_params.port, AF_INET6, dst_ip, size);
+ if (result == false)
+ {
+ USP_LOG_Debug("%s: Check connectivity fallback with IPv4", __FUNCTION__);
+ result = check_connectivity(addr_list, client->conn_params.port, AF_INET, dst_ip, size);
+ }
+ }
+ else
+ {
+ // First try with v4 then fallback to v6
+ USP_LOG_Debug("%s: Check connectivity with IPv4 first", __FUNCTION__);
+ result = check_connectivity(addr_list, client->conn_params.port, AF_INET, dst_ip, size);
+ if (result == false)
+ {
+ USP_LOG_Debug("%s: Check connectivity fallback with IPv6", __FUNCTION__);
+ result = check_connectivity(addr_list, client->conn_params.port, AF_INET6, dst_ip, size);
+ }
+ }
}
- // Connect was successful
- result = true;
-
-exit:
- // Tidy up
- if (socket_fd != INVALID)
- {
- close(socket_fd);
+ if (addr_list != NULL) {
+ (void) freeaddrinfo(addr_list);
}
return result;
@@ -2487,18 +2609,20 @@ void Connect(mqtt_client_t *client)
{
int err = USP_ERR_OK;
bool is_up;
+ char dst_ip[INET6_ADDRSTRLEN] = {0};
// Exit if after probing the MQTT Broker, it is not up
// In this case, we do not attempt to call libmosquitto's connect function, as that function will end up blocking for 2 minutes (since the MQTT broker is down)
- is_up = IsMqttBrokerUp(client);
- if (is_up == false)
+ is_up = IsMqttBrokerUp(client, dst_ip, sizeof(dst_ip));
+ if (is_up == false || strlen(dst_ip) == 0)
{
+ USP_LOG_Debug("%s: MQTT broker(%s) is not UP", __FUNCTION__, client->conn_params.host);
err = USP_ERR_INTERNAL_ERROR;
goto exit;
}
// Start the MQTT Connect
- err = PerformMqttClientConnect(client);
+ err = PerformMqttClientConnect(client, dst_ip);
// Exit if failed to connect
if (err != USP_ERR_OK)
@@ -2531,7 +2655,7 @@ exit:
** \return USP_ERR_INTERNAL_ERROR if failed to connect (and should retry)
**
**************************************************************************/
-int PerformMqttClientConnect(mqtt_client_t *client)
+int PerformMqttClientConnect(mqtt_client_t *client, const char *host_ip)
{
int version;
mosquitto_property *proplist = NULL;
@@ -2601,19 +2725,19 @@ int PerformMqttClientConnect(mqtt_client_t *client)
// We do this to prevent the data model thread from potentially being blocked, whilst the connect call is taking place
OS_UTILS_UnlockMutex(&mqtt_access_mutex);
- USP_LOG_Info("Sending CONNECT frame to (host=%s, port=%d)", client->conn_params.host, client->conn_params.port);
+ USP_LOG_Info("Sending CONNECT frame to (host=%s, ip=%s, port=%d)", client->conn_params.host, host_ip, client->conn_params.port);
FRAME_TRACE_DUMP(client);
// Perform the TCP connect
// NOTE: TCP connect can block for around 2 minutes if the broker does not respond to the TCP handshake
if (version == kMqttProtocol_5_0)
{
- mosq_err = mosquitto_connect_bind_v5(client->mosq, client->conn_params.host, client->conn_params.port,
+ mosq_err = mosquitto_connect_bind_v5(client->mosq, host_ip, client->conn_params.port,
keep_alive, NULL, proplist);
}
else
{
- mosq_err = mosquitto_connect(client->mosq, client->conn_params.host, client->conn_params.port,
+ mosq_err = mosquitto_connect(client->mosq, host_ip, client->conn_params.port,
keep_alive);
}

View File

@@ -5,13 +5,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=parental-control
PKG_VERSION:=1.3.1
PKG_VERSION:=1.2.0
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/network/parental-control.git
PKG_SOURCE_VERSION:=b1e5b3f81f08271bdaf9cb4bda8a7696a27be3c6
PKG_SOURCE_VERSION:=5d931642e1d46b8cfe24e37054ffd2885e354c2c
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif
@@ -81,20 +81,19 @@ define Package/parental-control/install
$(INSTALL_DIR) $(1)/etc/uci-defaults
$(INSTALL_DATA) ./files/etc/uci-defaults/95-firewall_parentalcontrol.ucidefaults $(1)/etc/uci-defaults/
$(INSTALL_DATA) ./files/etc/uci-defaults/35-migrate_urlfilter.ucidefaults $(1)/etc/uci-defaults/
$(INSTALL_DATA) ./files/etc/uci-defaults/95-migrate_urlfilter.ucidefaults $(1)/etc/uci-defaults/
$(INSTALL_DIR) $(1)/lib/upgrade/keep.d
$(INSTALL_DATA) ./files/lib/upgrade/keep.d/parentalcontrol $(1)/lib/upgrade/keep.d/parentalcontrol
$(BBFDM_REGISTER_SERVICES) -v ${VENDOR_PREFIX} ./bbfdm_service.json $(1) parentalcontrol
$(INSTALL_DATA) ./files/etc/uci-defaults/40-parental_control_update_bundle_path $(1)/etc/uci-defaults/
ifeq ($(CONFIG_PARENTAL_CONTROL_URLFILTERING),y)
$(INSTALL_DATA) ./files/etc/uci-defaults/50-parental_control_add_bundles $(1)/etc/uci-defaults/
$(INSTALL_DATA) ./files/etc/uci-defaults/55-add-default-bundles $(1)/etc/uci-defaults/
$(CP) ./files/urlbundle_override.json $(1)/etc/parentalcontrol/
else
$(BBFDM_INSTALL_MS_PLUGIN) -v ${VENDOR_PREFIX} ./files/urlbundle_override.json $(1) parentalcontrol
$(INSTALL_DATA) ./files/etc/uci-defaults/50-parental_control_disable_urlfilter $(1)/etc/uci-defaults/
$(INSTALL_DATA) ./files/etc/uci-defaults/50-parental_control_urlfilter $(1)/etc/uci-defaults/
endif
endef

View File

@@ -1,3 +1,4 @@
config globals 'globals'
option enable '1'
option enable '0'
option loglevel '3'
option urlfilter '1'

View File

@@ -5,16 +5,19 @@
enabled="$(uci -q get parentalcontrol.globals.enable)"
urlfilter="$(uci -q get parentalcontrol.globals.urlfilter)"
# if parentalcontrol is enabled, add the rules, else remove them
# if parentalcontrol is enabled, add the rules, else remove them
if [ "${enabled}" -eq "1" ]; then
# this is for internet_access and profile_bedtime_schedule sections
add_internet_schedule_rules
# this is for urlfilter daemon
add_iptables_nfqueue_rules
if [ "${urlfilter}" -eq "1" ]; then
add_iptables_nfqueue_rules
# this for internet_access and profile_bedtime_schedule sections
add_internet_schedule_rules
fi
else
# remove internet_access and profile_bedtime_schedule rules
remove_internet_schedule_rules
# remove urlfilter daemon rules
remove_iptables_nfqueue_rules
if [ "${urlfilter}" -eq "1" ]; then
# remove internet_access and profile_bedtime_schedule rules
remove_internet_schedule_rules
fi
fi

View File

@@ -38,17 +38,15 @@ configure_fw_rules() {
fi
if [ "${urlfilter}" -eq "1" ]; then
if [ ! -f "${OVERRIDE_JSON}" ]; then
if [ ! -f "${DM_PLUGIN_PATH}" ]; then
# throw error
log "ERROR: urlfiltering disabled at compile time but enabled in config"
else
# Now flush the existing connections, otherwise,
# URL filtering cannot be performed on already open sites.
if which hw_nat > /dev/null 2>&1; then
hw_nat -! > /dev/null 2>&1
fi
if which conntrack > /dev/null 2>&1; then
conntrack -F > /dev/null 2>&1
if [ -n "$(which conntrack)" ]; then
sleep 5
conntrack -F
fi
# this is for urlfilter daemon
@@ -85,13 +83,14 @@ start_service() {
config_load parentalcontrol
validate_global_section
[ -n "${bundle_path}" ] && mkdir -p ${bundle_path}
# add default bundles
process_default_bundles
# add firewall rules
configure_fw_rules
if [ "${urlfilter}" -eq "1" ]; then
# add default bundles
[ -n "${bundle_path}" ] && mkdir -p ${bundle_path}
process_default_bundles
enable_urlfilter_dm
else
disable_urlfilter_dm
@@ -101,7 +100,7 @@ start_service() {
# then /tmp/dhcp.leases will be empty until clients try to get a lease,
# in that case, hostnames will not be processed by the daemon,
# for this we copy /tmp/dhcp.leases to /etc/parentalcontrol/dhcp.leases
# which will be persistent across reboots and upgrade (with keep settings)
# which will be persistent acrros reboots and upgrade where settings are kept
# and will be used as a backup in case /tmp/dhcp.leases is empty
copy_dhcp_leases
@@ -120,7 +119,7 @@ stop_service() {
}
reload_service() {
ret=$(ubus call service list '{"name":"parentalcontrol"}' | jsonfilter -qe '@.parentalcontrol.instances.parentalcontrol.running')
ret=$(ubus call service list '{"name":"parentalcontrol"}' | jsonfilter -qe '@.parentalcontrol.instances.parentalcontrol_dm.running')
if [ "$ret" != "true" ]; then
stop
start

View File

@@ -1,38 +0,0 @@
#!/bin/sh
[ ! -f "/etc/config/parentalcontrol" ] && exit 0
APPS_DIR="/apps"
check_mounted_app_partition() {
local free
if [ ! -d "${APPS_DIR}" ]; then
return 1
fi
# Check free space in disk
free="$(df -P "${APPS_DIR}"|tail -n 1|awk '{print $4}')"
# disable if free storage is less then 300M
if [ "${free}" -lt 307200 ]; then
return 1
fi
return 0
}
if check_mounted_app_partition; then
uci -q set parentalcontrol.globals.bundle_path="${APPS_DIR}/parentalcontrol"
# configure the urlfilter if not configured
urlfilter="$(uci -q get parentalcontrol.globals.urlfilter)"
if [ -z "${urlfilter}" ]; then
uci -q set parentalcontrol.globals.urlfilter='1'
fi
else
uci -q set parentalcontrol.globals.urlfilter='0'
fi
exit 0

View File

@@ -1,43 +0,0 @@
#!/bin/sh
[ ! -f "/etc/config/parentalcontrol" ] && exit 0
COUNT=1
add_urlbundle()
{
local name url
url="${1}"; shift
name="$*"
uci -q set parentalcontrol.urlbundle_${COUNT}=urlbundle
uci -q set parentalcontrol.urlbundle_${COUNT}.name="${name}"
uci -q set parentalcontrol.urlbundle_${COUNT}.download_url="${url}"
COUNT="$((COUNT+1))"
}
urlfilter="$(uci -q get parentalcontrol.globals.urlfilter)"
if [ "${urlfilter}" -eq "1" ]; then
add_urlbundle "https://blocklistproject.github.io/Lists/alt-version/abuse-nl.txt" "Abuse"
add_urlbundle "https://blocklistproject.github.io/Lists/alt-version/ads-nl.txt" "Ads"
add_urlbundle "https://blocklistproject.github.io/Lists/alt-version/crypto-nl.txt" "Crypto"
add_urlbundle "https://blocklistproject.github.io/Lists/alt-version/drugs-nl.txt" "Drugs"
add_urlbundle 'https://blocklistproject.github.io/Lists/alt-version/everything-nl.txt' "Everything else"
add_urlbundle 'https://blocklistproject.github.io/Lists/alt-version/facebook-nl.txt' 'Facebook/Instagram'
add_urlbundle 'https://blocklistproject.github.io/Lists/alt-version/fraud-nl.txt' 'Fraud'
add_urlbundle 'https://blocklistproject.github.io/Lists/alt-version/gambling-nl.txt' 'Gambling'
add_urlbundle 'https://blocklistproject.github.io/Lists/alt-version/malware-nl.txt' 'Malware'
add_urlbundle 'https://blocklistproject.github.io/Lists/alt-version/phishing-nl.txt' 'Phishing'
add_urlbundle 'https://blocklistproject.github.io/Lists/alt-version/piracy-nl.txt' 'Piracy'
add_urlbundle 'https://blocklistproject.github.io/Lists/alt-version/porn-nl.txt' 'Porn'
add_urlbundle 'https://blocklistproject.github.io/Lists/alt-version/ransomware-nl.txt' 'Ransomware'
add_urlbundle 'https://blocklistproject.github.io/Lists/alt-version/redirect-nl.txt' 'Redirect'
add_urlbundle 'https://blocklistproject.github.io/Lists/alt-version/scam-nl.txt' 'Scam'
add_urlbundle 'https://blocklistproject.github.io/Lists/alt-version/tiktok-nl.txt' 'TikTok'
add_urlbundle 'https://blocklistproject.github.io/Lists/alt-version/torrent-nl.txt' 'Torrent'
add_urlbundle 'https://blocklistproject.github.io/Lists/alt-version/tracking-nl.txt' 'Tracking'
fi
exit 0

View File

@@ -1,14 +0,0 @@
#!/bin/sh
. /lib/functions.sh
[ ! -f "/etc/config/parentalcontrol" ] && exit 0
uci -q set parentalcontrol.globals.urlfilter='0'
_delete_urlbundle() {
uci_remove parentalcontrol "${1}"
}
config_load "parentalcontrol"
config_foreach _delete_urlbundle urlbundle

View File

@@ -0,0 +1,7 @@
#!/bin/sh
. /lib/functions.sh
[ ! -f "/etc/config/parentalcontrol" ] && exit 0
uci -q set parentalcontrol.globals.urlfilter='0'

View File

@@ -0,0 +1,40 @@
#!/bin/sh
COUNT=1
add_urlbundle()
{
local enabled name url
enabled="${1}"; shift
url="${1}"; shift
name="${@}"
uci -q set parentalcontrol.urlbundle_${COUNT}=urlbundle
uci -q set parentalcontrol.urlbundle_${COUNT}.enable="${enabled}"
uci -q set parentalcontrol.urlbundle_${COUNT}.name="${name}"
uci -q set parentalcontrol.urlbundle_${COUNT}.download_url="${url}"
COUNT="$((COUNT+1))"
}
add_urlbundle "0" "https://blocklistproject.github.io/Lists/alt-version/abuse-nl.txt" "Abuse"
add_urlbundle "0" "https://blocklistproject.github.io/Lists/alt-version/ads-nl.txt" "Ads"
add_urlbundle "0" "https://blocklistproject.github.io/Lists/alt-version/crypto-nl.txt" "Crypto"
add_urlbundle "1" "https://blocklistproject.github.io/Lists/alt-version/drugs-nl.txt" "Drugs"
add_urlbundle "0" 'https://blocklistproject.github.io/Lists/alt-version/everything-nl.txt' "Everything else"
add_urlbundle "1" 'https://blocklistproject.github.io/Lists/alt-version/facebook-nl.txt' 'Facebook/Instagram'
add_urlbundle "1" 'https://blocklistproject.github.io/Lists/alt-version/fraud-nl.txt' 'Fraud'
add_urlbundle "1" 'https://blocklistproject.github.io/Lists/alt-version/gambling-nl.txt' 'Gambling'
add_urlbundle "0" 'https://blocklistproject.github.io/Lists/alt-version/malware-nl.txt' 'Malware'
add_urlbundle "1" 'https://blocklistproject.github.io/Lists/alt-version/phishing-nl.txt' 'Phishing'
add_urlbundle "1" 'https://blocklistproject.github.io/Lists/alt-version/piracy-nl.txt' 'Piracy'
add_urlbundle "0" 'https://blocklistproject.github.io/Lists/alt-version/porn-nl.txt' 'Porn'
add_urlbundle "1" 'https://blocklistproject.github.io/Lists/alt-version/ransomware-nl.txt' 'Ransomware'
add_urlbundle "0" 'https://blocklistproject.github.io/Lists/alt-version/redirect-nl.txt' 'Redirect'
add_urlbundle "1" 'https://blocklistproject.github.io/Lists/alt-version/scam-nl.txt' 'Scam'
add_urlbundle "0" 'https://blocklistproject.github.io/Lists/alt-version/tiktok-nl.txt' 'TikTok'
add_urlbundle "0" 'https://blocklistproject.github.io/Lists/alt-version/torrent-nl.txt' 'Torrent'
add_urlbundle "0" 'https://blocklistproject.github.io/Lists/alt-version/tracking-nl.txt' 'Tracking'
exit 0

View File

@@ -2,8 +2,6 @@
. /lib/functions.sh
[ ! -f "/etc/config/urlfilter" ] && exit 0
# Convert URL filter to parental control format
urlfilter_config="/etc/config/urlfilter"
parentalcontrol_config="/etc/config/parentalcontrol"

View File

@@ -13,10 +13,7 @@ IP_RULE=""
ACL_FILE=""
parentalcontrol_ipv4_forward=""
parentalcontrol_ipv6_forward=""
bundle_path="$(uci -q get parentalcontrol.globals.bundle_path)"
default_bundle_dir="${bundle_path}/default/"
default_bundle_dir="/tmp/parentalcontrol/default/"
bundle_archive="/etc/parentalcontrol/urlbundles.tar.xz"
log() {
@@ -258,9 +255,7 @@ handle_schedule() {
schedule_added="1"
fi
# internet_access has been updated to be internet_break
# so drop traffic during the schedule, and allow outside the schedule
target="DROP"
target="ACCEPT"
config_get local_start_time "$schedule_section" "start_time" "00:00"
config_get duration "$schedule_section" "duration"
@@ -367,6 +362,11 @@ handle_internet_break() {
config_load "schedules"
config_foreach handle_schedule schedule "schedule" "$schedule_ref"
fi
# for access rule to work, need to have default drop rule as last rule
if [ "$schedule_added" = "1" ]; then
add_access_rule "$ACCESS_RULE" "" "" "" "DROP"
fi
done
}
@@ -438,31 +438,15 @@ add_internet_schedule_rules() {
}
add_iptables_nfqueue_rules() {
local filter_used
# Check if urlfilter used
if ! uci show parentalcontrol | grep -q profile_urlfilter; then
return
fi
# IPv4 rules
iptables -w -nL FORWARD | grep -iqE "NFQUEUE"
iptables -w -nL FORWARD|grep -iqE "NFQUEUE"
if [ "$?" -ne 0 ]; then
# capture DNS responses (UDP/TCP sport 53) in FORWARD
iptables -w -I FORWARD 1 -p tcp --sport 53 -j NFQUEUE --queue-num 0 --queue-bypass
iptables -w -I FORWARD 1 -p udp --sport 53 -j NFQUEUE --queue-num 0 --queue-bypass
# setup netfilter queue 0, use queue bypass so that if no application is
# listening to this queue then traffic is unaffected.
iptables -w -I FORWARD 1 -p tcp --match multiport --ports 80,443,53 -j NFQUEUE --queue-num 0 --queue-bypass
iptables -w -I FORWARD 1 -p udp --match multiport --ports 80,443,53 -j NFQUEUE --queue-num 0 --queue-bypass
# INPUT: DNS replies to router, skip loopback
iptables -w -I INPUT 1 -p tcp --sport 53 ! -i lo -j NFQUEUE --queue-num 0 --queue-bypass
iptables -w -I INPUT 1 -p udp --sport 53 ! -i lo -j NFQUEUE --queue-num 0 --queue-bypass
# OUTPUT: DNS replies from router, skip loopback
iptables -w -I OUTPUT 1 -p tcp --sport 53 ! -o lo -j NFQUEUE --queue-num 0 --queue-bypass
iptables -w -I OUTPUT 1 -p udp --sport 53 ! -o lo -j NFQUEUE --queue-num 0 --queue-bypass
# HTTP/HTTPS flows for urlfilter
iptables -w -I FORWARD 1 -p tcp --match multiport --ports 80,443 -j NFQUEUE --queue-num 0 --queue-bypass
iptables -w -I FORWARD 1 -p udp --match multiport --ports 80,443 -j NFQUEUE --queue-num 0 --queue-bypass
iptables -w -I INPUT 1 -p tcp --match multiport --ports 53 -j NFQUEUE --queue-num 0 --queue-bypass
iptables -w -I INPUT 1 -p udp --match multiport --ports 53 -j NFQUEUE --queue-num 0 --queue-bypass
# disable acceleration for https packet so that they can be read by urlfilter
ebtables --concurrent -A FORWARD -p ip --ip-protocol 6 --ip-destination-port 443 -j SKIPLOG 2> /dev/null
@@ -470,24 +454,14 @@ add_iptables_nfqueue_rules() {
ebtables --concurrent -A FORWARD -p ip --ip-protocol 17 --ip-source-port 53 -j SKIPLOG 2> /dev/null
fi
# IPv6 rules
ip6tables -w -nL FORWARD | grep -iqE "NFQUEUE"
ip6tables -w -nL FORWARD|grep -iqE "NFQUEUE"
if [ "$?" -ne 0 ]; then
# capture DNS responses (UDP/TCP sport 53) in FORWARD
ip6tables -w -I FORWARD 1 -p tcp --sport 53 -j NFQUEUE --queue-num 0 --queue-bypass
ip6tables -w -I FORWARD 1 -p udp --sport 53 -j NFQUEUE --queue-num 0 --queue-bypass
#ip6table rules
ip6tables -w -I FORWARD 1 -p tcp --match multiport --ports 80,443,53 -j NFQUEUE --queue-num 0 --queue-bypass
ip6tables -w -I FORWARD 1 -p udp --match multiport --ports 80,443,53 -j NFQUEUE --queue-num 0 --queue-bypass
# INPUT: DNS replies to router, skip loopback
ip6tables -w -I INPUT 1 -p tcp --sport 53 ! -i lo -j NFQUEUE --queue-num 0 --queue-bypass
ip6tables -w -I INPUT 1 -p udp --sport 53 ! -i lo -j NFQUEUE --queue-num 0 --queue-bypass
# OUTPUT: DNS replies from router, skip loopback
ip6tables -w -I OUTPUT 1 -p tcp --sport 53 ! -o lo -j NFQUEUE --queue-num 0 --queue-bypass
ip6tables -w -I OUTPUT 1 -p udp --sport 53 ! -o lo -j NFQUEUE --queue-num 0 --queue-bypass
# HTTP/HTTPS flows for urlfilter
ip6tables -w -I FORWARD 1 -p tcp --match multiport --ports 80,443 -j NFQUEUE --queue-num 0 --queue-bypass
ip6tables -w -I FORWARD 1 -p udp --match multiport --ports 80,443 -j NFQUEUE --queue-num 0 --queue-bypass
ip6tables -w -I INPUT 1 -p tcp --match multiport --ports 53 -j NFQUEUE --queue-num 0 --queue-bypass
ip6tables -w -I INPUT 1 -p udp --match multiport --ports 53 -j NFQUEUE --queue-num 0 --queue-bypass
# disable acceleration for https packet so that they can be read by urlfilter
ebtables --concurrent -A FORWARD -p ip6 --ip6-protocol 6 --ip6-destination-port 443 -j SKIPLOG 2> /dev/null
@@ -497,38 +471,26 @@ add_iptables_nfqueue_rules() {
}
remove_iptables_nfqueue_rules() {
iptables -w -nL FORWARD | grep -iqE "NFQUEUE"
iptables -w -nL FORWARD|grep -iqE "NFQUEUE"
if [ "$?" -eq 0 ]; then
# DNS response rules
iptables -w -D FORWARD -p tcp --sport 53 -j NFQUEUE --queue-num 0 --queue-bypass
iptables -w -D FORWARD -p udp --sport 53 -j NFQUEUE --queue-num 0 --queue-bypass
iptables -w -D INPUT -p tcp --sport 53 ! -i lo -j NFQUEUE --queue-num 0 --queue-bypass
iptables -w -D INPUT -p udp --sport 53 ! -i lo -j NFQUEUE --queue-num 0 --queue-bypass
iptables -w -D OUTPUT -p tcp --sport 53 ! -o lo -j NFQUEUE --queue-num 0 --queue-bypass
iptables -w -D OUTPUT -p udp --sport 53 ! -o lo -j NFQUEUE --queue-num 0 --queue-bypass
iptables -w -D FORWARD -p tcp --match multiport --ports 80,443,53 -j NFQUEUE --queue-num 0 --queue-bypass
iptables -w -D FORWARD -p udp --match multiport --ports 80,443,53 -j NFQUEUE --queue-num 0 --queue-bypass
# HTTP/HTTPS
iptables -w -D FORWARD -p tcp --match multiport --ports 80,443 -j NFQUEUE --queue-num 0 --queue-bypass
iptables -w -D FORWARD -p udp --match multiport --ports 80,443 -j NFQUEUE --queue-num 0 --queue-bypass
iptables -w -D INPUT -p tcp --match multiport --ports 53 -j NFQUEUE --queue-num 0 --queue-bypass
iptables -w -D INPUT -p udp --match multiport --ports 53 -j NFQUEUE --queue-num 0 --queue-bypass
ebtables --concurrent -D FORWARD -p ip --ip-protocol 6 --ip-destination-port 443 -j SKIPLOG 2> /dev/null
ebtables --concurrent -D FORWARD -p ip --ip-protocol 6 --ip-source-port 53 -j SKIPLOG 2> /dev/null
ebtables --concurrent -D FORWARD -p ip --ip-protocol 17 --ip-source-port 53 -j SKIPLOG 2> /dev/null
fi
ip6tables -w -nL FORWARD | grep -iqE "NFQUEUE"
ip6tables -w -nL FORWARD|grep -iqE "NFQUEUE"
if [ "$?" -eq 0 ]; then
# DNS response rules
ip6tables -w -D FORWARD -p tcp --sport 53 -j NFQUEUE --queue-num 0 --queue-bypass
ip6tables -w -D FORWARD -p udp --sport 53 -j NFQUEUE --queue-num 0 --queue-bypass
ip6tables -w -D INPUT -p tcp --sport 53 ! -i lo -j NFQUEUE --queue-num 0 --queue-bypass
ip6tables -w -D INPUT -p udp --sport 53 ! -i lo -j NFQUEUE --queue-num 0 --queue-bypass
ip6tables -w -D OUTPUT -p tcp --sport 53 ! -o lo -j NFQUEUE --queue-num 0 --queue-bypass
ip6tables -w -D OUTPUT -p udp --sport 53 ! -o lo -j NFQUEUE --queue-num 0 --queue-bypass
#ip6table rules
ip6tables -w -D FORWARD -p tcp --match multiport --ports 80,443,53 -j NFQUEUE --queue-num 0 --queue-bypass
ip6tables -w -D FORWARD -p udp --match multiport --ports 80,443,53 -j NFQUEUE --queue-num 0 --queue-bypass
# HTTP/HTTPS
ip6tables -w -D FORWARD -p tcp --match multiport --ports 80,443 -j NFQUEUE --queue-num 0 --queue-bypass
ip6tables -w -D FORWARD -p udp --match multiport --ports 80,443 -j NFQUEUE --queue-num 0 --queue-bypass
ip6tables -w -D INPUT -p tcp --match multiport --ports 53 -j NFQUEUE --queue-num 0 --queue-bypass
ip6tables -w -D INPUT -p udp --match multiport --ports 53 -j NFQUEUE --queue-num 0 --queue-bypass
ebtables --concurrent -D FORWARD -p ip6 --ip6-protocol 6 --ip6-destination-port 443 -j SKIPLOG 2> /dev/null
ebtables --concurrent -D FORWARD -p ip6 --ip6-protocol 6 --ip6-source-port 53 -j SKIPLOG 2> /dev/null

View File

@@ -3,31 +3,12 @@
. /lib/functions.sh
LOCKFILE="/tmp/sync_bundles.lock"
log_level="$(uci -q get parentalcontrol.globals.loglevel)"
log_level="${log_level:-1}"
DEBUG=0
log_err() {
logger -t urlfilter.sync -p error "$*"
if [ "${DEBUG}" -eq "1" ]; then
echo "#ERR# $* #" >/dev/console
fi
}
log_info() {
if [ "${log_level}" -gt 3 ]; then
logger -t urlfilter.sync -p info "$*"
fi
if [ "${DEBUG}" -eq "1" ]; then
echo "#INFO# $* #" >/dev/console
fi
}
# this script handles syncing bundles
# if its a remote file, then it would be downloaded and placed in bundle_dir
bundle_path="$(uci -q get parentalcontrol.globals.bundle_path)"
if [ -z "${bundle_path}" ]; then
return 0
bundle_path="/tmp/parentalcontrol"
fi
stringstore_dir="${bundle_path}/stringstore"
@@ -57,15 +38,15 @@ update_bundle_file_from_url() {
available_memory=$(df "$bundle_dir" | tail -n 1 | awk '{print $(NF-2)}') # Available memory in 1K blocks
local needed_blocks=$((bundle_file_size / 1024)) # Convert bundle_file_size to 1K blocks
local max_size=$((50 * 1024 * 1024)) # 50MB in bytes
local max_size=$((10 * 1024 * 1024)) # 10MB in bytes
if [ "$available_memory" -le "$needed_blocks" ]; then
log_info "Error: Not enough disk space for bundle: ${bundle_name}"
logger -p info "Error: Not enough disk space for bundle: ${bundle_name}"
return 1
fi
if [ "$bundle_file_size" -gt "$max_size" ]; then
log_info "update_bundle_file_from_url: Error: File size for ${bundle_name} exceeds 10MB"
logger -p info "update_bundle_file_from_url: Error: File size for ${bundle_name} exceeds 10MB"
return 1
fi
@@ -76,7 +57,7 @@ update_bundle_file_from_url() {
else
# Random delay (0-5s) before starting the download
local delay=$((RANDOM % 6))
log_info "update_bundle_file_from_url: Waiting ${delay}s before downloading..."
logger -p info "update_bundle_file_from_url: Waiting ${delay}s before downloading..."
sleep "$delay"
# Retry logic with exponential backoff
@@ -84,11 +65,12 @@ update_bundle_file_from_url() {
local attempt=1
local success=0
while [ $attempt -le 3 ]; do
if curl -s -o "$temp_file" "$download_url"; then
curl -s -o "$temp_file" "$download_url"
if [ $? -eq 0 ]; then
success=1
break
else
log_info "update_bundle_file_from_url: Download failed. Retrying $attempt ..."
logger -p info "update_bundle_file_from_url: Download failed. Retrying $attempt ..."
local backoff=$(( (2 ** attempt) + (RANDOM % 3) )) # Exponential backoff + 0-2s jitter
sleep "$backoff"
fi
@@ -96,7 +78,7 @@ update_bundle_file_from_url() {
done
if [ $success -ne 1 ]; then
log_info "update_bundle_file_from_url: Failed to download bundle: ${bundle_name}"
logger -p info "update_bundle_file_from_url: Failed to download bundle: ${bundle_name}"
rm -f "$temp_file"
return 1
fi
@@ -107,7 +89,7 @@ update_bundle_file_from_url() {
local final_path="${bundle_dir}/${bundle_file_name}"
if [[ "$file_path" =~ \.xz$ ]]; then
if ! xz -dc "$file_path" > "$final_path"; then
log_info "update_bundle_file_from_url: Decompression failed."
logger -p info "update_bundle_file_from_url: Decompression failed."
rm -f "$final_path"
rm -f "$file_path"
return 1
@@ -116,7 +98,7 @@ update_bundle_file_from_url() {
rm -f "$file_path"
elif [[ "$file_path" =~ \.gz$ ]]; then
if ! gzip -dc "$file_path" > "$final_path"; then
log_info "update_bundle_file_from_url: Decompression failed."
logger -p info "update_bundle_file_from_url: Decompression failed."
rm -f "$final_path"
rm -f "$file_path"
return 1
@@ -152,6 +134,7 @@ handle_download_url() {
local file_name="${sanitized_url##*/}" # Get everything after the last '/'
local bundle_file_name="${file_name}.urlbundle"
local unprocessed_file=0
local file_path="${sanitized_url#file://}"
if echo "$sanitized_url" | grep -qE "^https?://|^file://"; then
@@ -170,7 +153,7 @@ handle_download_url() {
fi
if [ -n "$previous_bundle_size" ] && [ "$bundle_file_size" -eq "$previous_bundle_size" ]; then
return 0
return
fi
if echo "$sanitized_url" | grep -q "^file://" && ! echo "$sanitized_url" | grep -Eq "\.(xz|gz)$"; then
@@ -178,7 +161,7 @@ handle_download_url() {
sed -i "/^${bundle_file_name} /d" "$bundle_sizes"
echo "$bundle_file_name $bundle_file_size" >> "$bundle_sizes"
ubus send "parentalcontrol.bundle.update" "{\"bundle_file_path\":\"${file_path}\",\"bundle_name\":\"${bundle_name}\"}"
return 0
return
fi
# Remove existing entries
@@ -190,9 +173,11 @@ handle_download_url() {
update_bundle_file_from_url "$sanitized_url" "$bundle_file_name" "$bundle_file_size" "$bundle_name" "$file_name"
return $?
else
log_info "Error: Unsupported URL format for ${bundle_file_name}"
logger -p info "Error: Unsupported URL format for ${bundle_file_name}"
return 1
fi
return 0
}
cleanup_bundle_files() {
@@ -204,7 +189,7 @@ cleanup_bundle_files() {
get_download_url() {
local section="$1"
config_get url "$section" download_url
config_get_bool enable "$1" enable 1
config_get_bool enable "$1" enable 0
if [ "${enable}" -eq 0 ]; then
# bundle is disabled
@@ -237,56 +222,46 @@ cleanup_bundle_files() {
done
}
cleanup_bundle_sizes() {
downloaded_bundle_names="$(cat "$bundle_sizes" | cut -d '.' -f 1)"
for name in $downloaded_bundle_names; do
if ls ${stringstore_dir}/${name}* 2>&1 | grep -qF '.store'; then
if ls ${stringstore_dir}/${name}* 2>&1 | grep -q cmph; then
continue
fi
fi
sed -i "/$name/d" "$bundle_sizes"
done
}
# Main handler for all profile URL bundles
handle_filter_for_bundles() {
local urlfilter
urlfilter="$(uci -q get parentalcontrol.globals.urlfilter)"
# if urlfilter is not enabled, then return
if [ "${urlfilter}" -ne "1" ]; then
log_info "urlfilter feature not enabled"
return 0
logger -p info "urlbundle not supported"
return
fi
ubus -t 20 wait_for bbfdm.parentalcontrol
if [ "$?" -ne 0 ]; then
logger -p error "bbfdm.parentalcontrol object not found"
return
fi
initialize_environment
cleanup_bundle_files "$bundle_dir"
cleanup_bundle_files "$stringstore_dir"
cleanup_bundle_sizes
config_load parentalcontrol
config_get_bool enable globals enable 0
if [ "${enable}" -eq 0 ]; then
log_info "parental-control feature not enabled"
# Parental control is disabled
return 0
fi
check_bundle_exists() {
local enable download_url name cfg
local profile enable bundles bundle_name download_url
cfg="$1"
check_bundle_exists() {
local cfg="$1"
config_get name "$cfg" name
config_get_bool enable "$cfg" enable 1
config_get_bool enable "$cfg" enable 0
config_get download_url "$cfg" download_url
if [ "${enable}" -eq 0 ]; then
log_info "Skipping bundle ${name} not enabled"
# bundle is disabled
return 0
fi
@@ -307,6 +282,6 @@ handle_filter_for_bundles() {
# Open file descriptor 200 for locking
exec 200>"$LOCKFILE"
# Try to acquire an exclusive lock; exit if another instance is running
flock -n 200 || { log_info "sync_bundles.sh is already running, exiting."; exit 1; }
flock -n 200 || { logger -p info "sync_bundles.sh is already running, exiting."; exit 1; }
handle_filter_for_bundles

View File

@@ -434,6 +434,4 @@ hw_commit_all() {
/userfs/bin/ifc add vip pbit $pbit
done
fi
hw_nat -! > /dev/null 2>&1
}

View File

@@ -22,10 +22,6 @@ ip_rule_get_converted_tos() {
echo $con_tos
}
flush_hw_nat() {
hw_nat -! > /dev/null 2>&1
}
configure_qos() {
# queue configuration is being done after shaper configuration,
# If port shapingrate configuration on DISC device is called after queue configuration then
@@ -37,9 +33,8 @@ configure_qos() {
configure_policer
configure_classify
if [ -f "/tmp/qos/classify.ebtables" ]; then
sh /tmp/qos/classify.ebtables
sh /tmp/qos/classify.ebtables
fi
flush_hw_nat
}
reload_qos() {
@@ -70,7 +65,6 @@ reload_qos() {
;;
esac
hw_commit_all
flush_hw_nat
}
reload_qos_service() {

View File

@@ -14,13 +14,11 @@ PREV_LINKSPEED=$(cat ${LINKSPEED_FILE} 2>/dev/null)
[ -z "${PREV_LINKSPEED}" ] && PREV_LINKSPEED=0
if [ $((LINKSPEED)) -ne $((PREV_LINKSPEED)) -a $((LINKSPEED)) -ne 0 ]; then
if [ $((LINKSPEED)) -ge 100 ]; then
if [ $((LINKSPEED)) -ge 10000 ]; then
/userfs/bin/qosrule discpline Rate uplink-bandwidth $((LINKSPEED*1000*999/1000))
else
/userfs/bin/qosrule discpline Rate uplink-bandwidth $((LINKSPEED*1000*990/1000))
/userfs/bin/qosrule discpline Rate uplink-bandwidth $((LINKSPEED*1000))
fi
mkdir -p "/tmp/qos"
echo ${LINKSPEED} > ${LINKSPEED_FILE}
hw_nat -! > /dev/null 2>&1
fi

View File

@@ -1,30 +1,11 @@
. /lib/functions.sh
handle_interface() {
local config="${1}"
local prefix="${2}"
config_get ifname "${config}" ifname
config_get mode ${config} mode
interfaces=$(uci show wireless | grep "ifname=" | awk -F'[.,=]' '{print$2}')
for int in $interfaces; do
mode=$(uci get "wireless.${int}.mode")
if [ "$mode" = "ap" ] ; then
echo "Get assoc list for ${ifname}"
ubus call "${prefix}.${ifname}" assoclist
echo "Get station info for ${ifname}"
ubus call "${prefix}.${ifname}" stations
ap_int=$(uci get "wireless.${int}.ifname")
echo "Get assoc list for ${ap_int}"
ubus call "wifi.ap.${ap_int}" assoclist
echo "Get station info for ${ap_int}"
ubus call "wifi.ap.${ap_int}" stations
fi
}
handle_wifi_interface() {
handle_interface "$1" "wifi.ap"
}
handle_mld_interface() {
handle_interface "$1" "wifi.apmld"
}
config_load wireless
echo "Get associated stations information for non-MLD interfaces"
config_foreach handle_wifi_interface wifi-iface
echo "Get associated stations information for MLD interfaces"
config_foreach handle_mld_interface wifi-mld
done

View File

@@ -22,4 +22,3 @@ handle_interface() {
config_load wireless
config_foreach handle_interface wifi-iface
config_foreach handle_interface wifi-mld

View File

@@ -1,36 +0,0 @@
. /lib/functions.sh
handle_interface() {
local config="${1}"
local prefix="${2}"
config_get ifname "${config}" ifname
config_get mode ${config} mode
if [ "$mode" = "ap" ] ; then
echo "Get status for ${ifname}"
ubus call "${prefix}.${ifname}" status
elif [ "$mode" = "sta" ] ; then
echo "Get status for bSTA ${ifname}"
ubus call "wifi.bsta.${ifname}" status
fi
}
handle_wifi_interface() {
handle_interface "$1" "wifi.ap"
}
handle_mld_interface() {
handle_interface "$1" "wifi.apmld"
}
config_load wireless
echo "Get wifi status"
ubus call wifi status
echo "Get wifi status for ap & bsta interfaces"
config_foreach handle_wifi_interface wifi-iface
echo "Get wifi status for apmld interfaces"
config_foreach handle_mld_interface wifi-mld

View File

@@ -94,16 +94,6 @@
"file": "/tmp/dhcp.client.options"
}
]
},
{
"description": "Firewall fw3 reload status",
"cmd": "cat /var/log/firewall.log",
"dependency" : [
{
"type": "file",
"file": "/var/log/firewall.log"
}
]
}
]
}

View File

@@ -13,8 +13,7 @@
},
{
"description": "WiFi Status",
"cmd": "sh /usr/share/self-diagnostics/helper/wifi_status.sh",
"timeout": 10
"cmd": "ubus call wifi status"
},
{
"description": "WiFi Radio Status",

View File

@@ -82,15 +82,22 @@ case "$1" in
# if pid equals server pid then skip
[ "$session_pid" -eq "$server_pid" ] && continue
network_info="$(get_network_info "$session_pid" "$server_pid")"
if [ $? -eq 0 ]; then
ip=$(echo "$network_info" | cut -d' ' -f1)
port=$(echo "$network_info" | cut -d' ' -f2)
# get this session's ppid
session_ppid="$(grep PPid /proc/$session_pid/status | awk '{print $2}')"
# if session's parent is this server
if [ "$session_ppid" -eq "$server_pid" ]; then
session="$(netstat -ntp | grep $session_pid/ | awk '{print $5,$7}' | cut -d'/' -f 1)"
# return session IP, Port, PID
ip=$(echo "$session" | cut -d ':' -f 1)
port=$(echo "$session" | cut -d ':' -f 2 | cut -d ' ' -f 1)
pid=$(echo "$session" | cut -d ':' -f 2 | cut -d ' ' -f 2)
json_add_object
json_add_string "ip" "$ip"
json_add_string "port" "$port"
json_add_string "pid" "$session_pid"
json_add_string "pid" "$pid"
json_close_object
fi
done
@@ -135,14 +142,10 @@ case "$1" in
[ "$session_pid" -eq "$server_pid" ] && continue
# get this session's ppid
session_ppid="$(grep PPid /proc/$session_pid/status 2>/dev/null | awk '{print $2}')"
[ -z "$session_ppid" ] && continue
session_ppid="$(grep PPid /proc/$session_pid/status | awk '{print $2}')"
# get the parent of the parent (the grandparent)
grandparent_pid="$(grep PPid /proc/$session_ppid/status 2>/dev/null | awk '{print $2}')"
# if session's parent or grandparent is this server
if [ "$session_ppid" -eq "$server_pid" ] || { [ -n "$grandparent_pid" ] && [ "$grandparent_pid" -eq "$server_pid" ]; }; then
# if session's parent is this server
if [ "$session_ppid" -eq "$server_pid" ]; then
kill -15 "$session_pid"
fi
done

View File

@@ -17,26 +17,5 @@ get_session_pids()
{
local PidFile="$1"
echo "$(ps -w | grep "$PidFile" | grep -v grep | awk '{print $1}')"
}
get_network_info()
{
local session_pid="$1"
local server_pid="$2"
# get this session's ppid
session_ppid="$(grep PPid /proc/$session_pid/status | awk '{print $2}')"
# if session's parent is this server
if [ "$session_ppid" -eq "$server_pid" ]; then
session="$(netstat -ntp | grep "$session_pid/" | awk '{print $5,$7}' | cut -d'/' -f 1)"
if [ -n "$session" ]; then
ip=$(echo "$session" | cut -d ':' -f 1)
port=$(echo "$session" | cut -d ':' -f 2 | cut -d ' ' -f 1)
echo "$ip $port"
return 0
fi
fi
return 1
echo "$(ps -w | grep $PidFile | grep -v grep | awk '{print $1}')"
}

View File

@@ -17,39 +17,3 @@ get_session_pids()
{
echo "$(ps -w | grep sshd | grep pts | awk '{print $1}')"
}
get_network_info()
{
local session_pid="$1"
local server_pid="$2"
local session_info=""
local ip=""
local port=""
# get this session's ppid
session_ppid="$(grep PPid /proc/$session_pid/status 2>/dev/null | awk '{print $2}')"
[ -z "$session_ppid" ] && return 1
# get the parent of the parent (the grandparent)
grandparent_pid="$(grep PPid /proc/$session_ppid/status 2>/dev/null | awk '{print $2}')"
# if session's parent, or grandparent, is this server
if [ "$session_ppid" -eq "$server_pid" ] || { [ -n "$grandparent_pid" ] && [ "$grandparent_pid" -eq "$server_pid" ]; }; then
# The connection is usually handled by the parent process for openssh
session_info="$(netstat -ntp 2>/dev/null | awk -v pid="$session_ppid" '$7 ~ "^"pid"/" {print $5}')"
# If not found on parent, check the child process itself (less common for openssh)
if [ -z "$session_info" ]; then
session_info="$(netstat -ntp 2>/dev/null | awk -v pid="$session_pid" '$7 ~ "^"pid"/" {print $5}')"
fi
if [ -n "$session_info" ]; then
ip=$(echo "$session_info" | cut -d ':' -f 1)
port=$(echo "$session_info" | cut -d ':' -f 2)
echo "$ip $port"
return 0
fi
fi
return 1
}

View File

@@ -5,11 +5,11 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=sulu-base
PKG_VERSION:=5.1.2
PKG_VERSION:=4.1.5
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/websdk/sulu.git
PKG_SOURCE_VERSION:=6ee43863415b54c312a56e113a7a91d5ae25df28
PKG_SOURCE_VERSION:=359894b8bfbdd595c547f01c6a173a89a973ba8e
PKG_MIRROR_HASH:=skip
SULU_MOD:=core

View File

@@ -5,12 +5,12 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=sulu-builder
PKG_VERSION:=5.1.2
PKG_VERSION:=4.1.5
PKG_RELEASE:=1
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/websdk/sulu-builder.git
PKG_SOURCE_VERSION:=cca6a777e46584c888a1a06fafa75f7a063b803d
PKG_SOURCE_VERSION:=006e7cc0cb091d5fe9c68019020906946b0ce7f4
PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_SOURCE_VERSION)
PKG_SOURCE:=$(PKG_NAME)-$(PKG_SOURCE_VERSION).tar.gz
PKG_BUILD_DIR:=$(BUILD_DIR)/sulu-$(PKG_VERSION)/sulu-builder-$(PKG_SOURCE_VERSION)
@@ -28,8 +28,7 @@ define Package/sulu/default
CATEGORY:=Utilities
SUBMENU:=SULU
TITLE:=SULU-CE
DEPENDS:=+mosquitto-auth-shadow +usermngr +userinterface +obuspa +sulu-vendorext
DEPENDS+=+@OBUSPA_LOCAL_MQTT_LISTENER
DEPENDS:=+mosquitto-auth-shadow +usermngr +jq +userinterface +obuspa +qrencode
EXTRA_DEPENDS:=nginx
endef
@@ -96,25 +95,24 @@ endef
define Package/sulu/install/Default
$(INSTALL_DIR) $(1)/sulu/
$(INSTALL_DIR) $(1)/etc/config
$(INSTALL_BIN) ./files/etc/config/sulu $(1)/etc/config/sulu
$(INSTALL_DIR) $(1)/etc/init.d
$(INSTALL_BIN) ./files/etc/init.d/sulu $(1)/etc/init.d/sulu
$(INSTALL_DIR) $(1)/etc/sulu
$(INSTALL_DATA) ./files/maintenance.html $(1)/sulu/
$(LN) /tmp/sulu $(1)/sulu/connection
$(INSTALL_BIN) ./files/etc/sulu/sulu.sh $(1)/etc/sulu/
$(INSTALL_DATA) ./files/etc/sulu/nginx.locations $(1)/etc/sulu/
$(INSTALL_BIN) ./files/etc/sulu/sulu_watcher.sh $(1)/etc/sulu/
$(INSTALL_DIR) $(1)/etc/users/roles
$(INSTALL_DATA) ./files/etc/users/roles/*.json $(1)/etc/users/roles/
$(INSTALL_DIR) $(1)/etc/uci-defaults
$(INSTALL_DATA) ./files/etc/uci-defaults/40-add-sulu-config $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/uci-defaults/40-add-sulu-config $(1)/etc/uci-defaults/
ifneq ($(CONFIG_SULU_DEFAULT_UI)$(CONFIG_SULU_BUILDER_DEFAULT_UI),)
$(INSTALL_DATA) ./files/etc/uci-defaults/41-make-sulu-default-ui $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/uci-defaults/41-make-sulu-default-ui $(1)/etc/uci-defaults/
endif
$(INSTALL_DIR) $(1)/etc/init.d
$(INSTALL_BIN) ./files/etc/init.d/sulu $(1)/etc/init.d/
endef
define Package/sulu/install/Post
@@ -130,6 +128,7 @@ define Package/sulu/install
$(Package/sulu/install/Post)
endef
define Package/sulu-builder/install
$(Package/sulu/install/Default)
$(INSTALL_DIR) $(1)/sulu/presets

View File

@@ -0,0 +1,2 @@
config global 'global'
option enabled '1'

51
sulu/sulu-builder/files/etc/init.d/sulu Normal file → Executable file
View File

@@ -1,15 +1,48 @@
#!/bin/sh /etc/rc.common
START=9
STOP=01
USE_PROCD=1
PROG=/etc/sulu/sulu_watcher.sh
START=90
STOP=01
start_service()
{
procd_open_instance "sulu"
procd_set_param command ${PROG}
procd_close_instance "sulu"
. /lib/functions.sh
log() {
echo "${@}" | logger -t sulu.init -p debug
}
validate_sulu_global_section() {
uci_validate_section sulu global global \
'enabled:bool:1'
}
start_service() {
local enabled
config_load sulu
procd_open_instance sulu
validate_sulu_global_section || return 0
# append sulu connection injection
if [ "${enabled}" -eq "0" ]; then
procd_close_instance
return 0
fi
if [ "${1}" = "update" ]; then
log "Reloading related services"
/etc/sulu/sulu.sh -r
fi
/etc/sulu/sulu.sh -q
procd_close_instance
}
reload_service() {
stop
start update
}
service_triggers() {
procd_add_reload_trigger "sulu" "userinterface" "mosquitto"
procd_add_reload_trigger "config.change" "mapcontroller" /etc/sulu/sulu.sh -q
}

Some files were not shown because too many files have changed in this diff Show More