Compare commits

..

4 Commits

Author SHA1 Message Date
Jakob Olsson
b6a542e151 map-agent: introduce config option that passes PERSIST_CONTROLLER CLFLAG 2024-06-03 16:41:15 +02:00
Jakob Olsson
9c81aacd88 map-agent: 6.1.1.2 2024-06-03 16:38:17 +02:00
Jakob Olsson
298d4dcb97 map-controller: 6.1.1.2 2024-06-03 16:33:02 +02:00
Jakob Olsson
190544d3c4 map-agent: map_genconfig: don't modify mapcontroller enabled unconditionally 2024-06-03 16:31:14 +02:00
140 changed files with 2196 additions and 2751 deletions

View File

@@ -1,4 +1,3 @@
if PACKAGE_libbbfdm
config BBF_VENDOR_LIST
string "Vendor List"
default "iopsys"
@@ -14,4 +13,3 @@ config BBF_OBFUSCATION_KEY
config BBF_MAX_OBJECT_INSTANCES
int "Maximum number of instances per object"
default 255
endif

View File

@@ -5,13 +5,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=bbfdm
PKG_VERSION:=1.11.5
PKG_VERSION:=1.8.26
USE_LOCAL:=0
ifneq ($(USE_LOCAL),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/bbf/bbfdm.git
PKG_SOURCE_VERSION:=ebdb414e098228ca4fb2c266eb58dee8e4b228a0
PKG_SOURCE_VERSION:=6df8fecbcc72e55f2ba06ae933dcddb6f725b204
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif
@@ -32,21 +32,12 @@ define Package/libbbfdm-api
ABI_VERSION:=1.0
endef
define Package/libbbfdm-ubus
SECTION:=utils
CATEGORY:=Utilities
SUBMENU:=TRx69
TITLE:=BBF datamodel ubus library, provides API to expose datamodel over ubus
DEPENDS:=+libuci +libubox +libubus +libblobmsg-json +libjson-c +libbbfdm-api
endef
define Package/libbbfdm
SECTION:=utils
CATEGORY:=Utilities
SUBMENU:=TRx69
TITLE:=Library for broadband forum data model support
DEPENDS:=+libuci +libubox +libubus +libblobmsg-json +libjson-c +libbbfdm-api +libopenssl
MENU:=1
endef
define Package/bbfdmd
@@ -54,40 +45,19 @@ define Package/bbfdmd
CATEGORY:=Utilities
SUBMENU:=TRx69
TITLE:=Datamodel ubus backend
DEPENDS:=+libuci +libubox +libubus +libblobmsg-json +libbbfdm-api +libbbfdm-ubus +libbbfdm +jq +bbf_configmngr
endef
define Package/bbf_configmngr
SECTION:=utils
CATEGORY:=Utilities
SUBMENU:=TRx69
TITLE:= BBF Config Manager
DEPENDS:=+libuci +libubox +libubus +libblobmsg-json
MENU:=1
DEPENDS:=+libuci +libubox +libubus +libblobmsg-json +libbbfdm-api +libbbfdm +jq
endef
define Package/libbbfdm/config
source "$(SOURCE)/Config_bbfdm.in"
endef
define Package/bbf_configmngr/config
source "$(SOURCE)/bbf_configmngr.in"
endef
define Package/libbbfdm-api/description
Library contains the API(UCI, UBUS, JSON, CLI and Browse) of libbbfdm
endef
define Package/libbbfdm-ubus/description
Library contains the APIs to expose data model over ubus
endef
define Package/libbbfdm/description
Library contains the data model tree, It includes basic TR181 nodes.
endef
define Package/bbf_configmngr/description
Daemon for handling bbf reload services via ubus bbf.config
Library contains the data model tree. It includes TR181, TR143 data models
endef
ifeq ($(USE_LOCAL),1)
@@ -112,42 +82,37 @@ endif
define Package/libbbfdm-api/install
$(INSTALL_DIR) $(1)/lib
$(CP) $(PKG_BUILD_DIR)/libbbfdm-api/libbbfdm-api.so $(1)/lib/
$(INSTALL_DIR) $(1)/usr/libexec/rpcd
$(CP) $(PKG_BUILD_DIR)/utilities/files/usr/libexec/rpcd/bbf.secure $(1)/usr/libexec/rpcd/bbf.secure
$(CP) $(PKG_BUILD_DIR)/utilities/files/usr/libexec/rpcd/bbf.diag $(1)/usr/libexec/rpcd/bbf.diag
$(INSTALL_DIR) $(1)/usr/share/bbfdm/scripts/
$(CP) $(PKG_BUILD_DIR)/utilities/files/usr/share/bbfdm/scripts/bbf_api $(1)/usr/share/bbfdm/scripts/
$(INSTALL_DIR) $(1)/etc/bbfdm/certificates
$(CP) $(PKG_BUILD_DIR)/libbbfdm-api/scripts/bbf.secure $(1)/usr/libexec/rpcd/bbf.secure
$(CP) $(PKG_BUILD_DIR)/libbbfdm-api/scripts/bbf.config $(1)/usr/libexec/rpcd/bbf.config
$(INSTALL_DIR) $(1)/etc/bbfdm
echo "$(CONFIG_BBF_OBFUSCATION_KEY)" > $(1)/etc/bbfdm/.secure_hash
endef
define Package/libbbfdm-ubus/install
$(INSTALL_DIR) $(1)/lib
$(CP) $(PKG_BUILD_DIR)/libbbfdm-ubus/libbbfdm-ubus.so $(1)/lib/
$(INSTALL_DIR) $(1)/etc/bbfdm/certificates
endef
define Package/libbbfdm/install
$(INSTALL_DIR) $(1)/lib
$(INSTALL_DIR) $(1)/etc/bbfdm
$(INSTALL_DIR) $(1)/etc/bbfdm/dmmap
$(INSTALL_DIR) $(1)/etc/uci-defaults
$(INSTALL_DIR) $(1)/lib/upgrade/keep.d
$(INSTALL_DIR) $(1)/usr/share/bbfdm/
$(CP) $(PKG_BUILD_DIR)/libbbfdm/libbbfdm.so $(1)/usr/share/bbfdm/libbbfdm.so
$(INSTALL_DIR) $(1)/lib/upgrade/keep.d
$(INSTALL_DATA) ./files/lib/upgrade/keep.d/bbf $(1)/lib/upgrade/keep.d/bbf
$(INSTALL_DIR) $(1)/etc/uci-defaults
$(INSTALL_BIN) ./files/etc/uci-defaults/95-portmap-firewall $(1)/etc/uci-defaults/95-portmap-firewall
$(INSTALL_BIN) ./files/etc/uci-defaults/97-firewall-service $(1)/etc/uci-defaults/97-firewall-service
$(INSTALL_BIN) ./files/etc/uci-defaults/99-link-core-plugins $(1)/etc/uci-defaults/99-link-core-plugins
$(INSTALL_BIN) ./files/etc/uci-defaults/90-remove-nonexisting-microservices $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/uci-defaults/91-fix-bbfdmd-enabled-option $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/firewall.portmap $(1)/etc/firewall.portmap
$(INSTALL_BIN) ./files/etc/firewall.service $(1)/etc/firewall.service
ifeq ($(findstring iopsys,$(CONFIG_BBF_VENDOR_LIST)),iopsys)
$(BBFDM_INSTALL_CORE_PLUGIN) $(PKG_BUILD_DIR)/libbbfdm/dmtree/vendor/iopsys/libbbfdm_iopsys_ext.so $(1)
endif
$(INSTALL_DIR) $(1)/usr/share/bbfdm/scripts/
$(CP) $(PKG_BUILD_DIR)/utilities/files/usr/share/bbfdm/scripts/bbf_activate_handler.sh $(1)/usr/share/bbfdm/scripts/
$(CP) $(PKG_BUILD_DIR)/utilities/files/usr/share/bbfdm/scripts/bbf_check_idle.sh $(1)/usr/share/bbfdm/scripts/
$(INSTALL_DIR) $(1)/usr/libexec/rpcd
$(CP) $(PKG_BUILD_DIR)/libbbfdm/scripts/* $(1)/usr/share/bbfdm/scripts/
$(LN) /usr/share/bbfdm/scripts/bbf.diag $(1)/usr/libexec/rpcd/bbf.diag
endef
define Package/libbbfdm/prerm
@@ -170,33 +135,14 @@ define Package/bbfdmd/install
$(INSTALL_BIN) ./files/etc/hotplug.d/iface/85-bbfdm-sysctl $(1)/etc/hotplug.d/iface/85-bbfdm-sysctl
endef
define Package/bbf_configmngr/install
$(INSTALL_DIR) $(1)/etc/init.d
ifeq ($(CONFIG_BBF_CONFIGMNGR_C_BACKEND),y)
$(INSTALL_DIR) $(1)/usr/sbin
$(INSTALL_BIN) $(PKG_BUILD_DIR)/utilities/bbf_configd $(1)/usr/sbin/
$(INSTALL_BIN) ./files/etc/init.d/bbf_configd $(1)/etc/init.d/bbf_configd
endif
ifeq ($(CONFIG_BBF_CONFIGMNGR_SCRIPT_BACKEND),y)
$(INSTALL_DIR) $(1)/usr/libexec/rpcd
$(CP) $(PKG_BUILD_DIR)/utilities/files/usr/libexec/rpcd/bbf.config $(1)/usr/libexec/rpcd/bbf.config
endif
endef
define Build/InstallDev
$(INSTALL_DIR) $(1)/usr/lib
$(INSTALL_DIR) $(1)/usr/include
$(INSTALL_DIR) $(1)/usr/include/libbbfdm-api
$(INSTALL_DIR) $(1)/usr/include/libbbfdm-ubus
$(INSTALL_DATA) $(PKG_BUILD_DIR)/libbbfdm-api/*.h $(1)/usr/include/libbbfdm-api/
$(INSTALL_DATA) $(PKG_BUILD_DIR)/libbbfdm-ubus/bbfdm-ubus.h $(1)/usr/include/libbbfdm-ubus/
$(INSTALL_DATA) $(PKG_BUILD_DIR)/libbbfdm-api/include/*.h $(1)/usr/include/
$(CP) $(PKG_BUILD_DIR)/libbbfdm-api/libbbfdm-api.so $(1)/usr/lib
$(CP) $(PKG_BUILD_DIR)/libbbfdm-ubus/libbbfdm-ubus.so $(1)/usr/lib
endef
$(eval $(call BuildPackage,bbf_configmngr))
$(eval $(call BuildPackage,libbbfdm-api))
$(eval $(call BuildPackage,libbbfdm-ubus))
$(eval $(call BuildPackage,libbbfdm))
$(eval $(call BuildPackage,bbfdmd))

View File

@@ -1,19 +0,0 @@
if PACKAGE_bbf_configmngr
choice
prompt "Select backend daemon for bbf.config"
default BBF_CONFIGMNGR_C_BACKEND
help
Select which backend daemon should be used for ubus bbf.config
config BBF_CONFIGMNGR_SCRIPT_BACKEND
bool "Use shell script backend"
help
Enable this option to use shell script as the backend for bbf.config. This can be useful for quick and easy scripting of configuration tasks.
config BBF_CONFIGMNGR_C_BACKEND
bool "Use C code backend"
help
Enable this option to use a C code implementation as the backend for bbf.config. This option is generally preferred for performance-critical tasks and scenarios requiring more robust and efficient handling.
endchoice
endif

View File

@@ -1,8 +1,11 @@
config bbfdmd 'bbfdmd'
option enable '1'
option loglevel '3'
option loglevel '1'
option refresh_time '120'
option transaction_timeout '30'
option subprocess_level '2'
config micro_services 'micro_services'
option enable '1'
option enable_core '0'
option enable_respawn '1'

View File

@@ -1,25 +0,0 @@
#!/bin/sh /etc/rc.common
START=64
STOP=10
USE_PROCD=1
PROG=/usr/sbin/bbf_configd
create_needed_directories()
{
mkdir -p /tmp/bbfdm/.cwmp
mkdir -p /tmp/bbfdm/.usp
mkdir -p /tmp/bbfdm/.bbfdm
}
start_service()
{
create_needed_directories
procd_open_instance "bbf_configd"
procd_set_param command ${PROG}
#procd_append_param command -d
procd_set_param respawn
procd_close_instance "bbf_configd"
}

View File

@@ -18,16 +18,18 @@ validate_bbfdm_micro_service_section()
{
uci_validate_section bbfdm micro_services "micro_services" \
'enable:bool:true' \
'enable_core:bool:false'
'enable_core:bool:false' \
'enable_respawn:bool:true'
}
_add_microservice()
{
local name path
local enable enable_core
local enable enable_core enable_respawn
# Check enable from micro-service
path="${1}"
enable_respawn="${2}"
enable_core="${3}"
name="$(basename ${path})"
@@ -50,7 +52,9 @@ _add_microservice()
procd_set_param stderr 1
fi
procd_set_param respawn
if [ "${enable_respawn}" -eq "1" ]; then
procd_set_param respawn "3600" "5" "5"
fi
procd_close_instance "${name}"
}
@@ -73,7 +77,7 @@ configure_bbfdm_micro_services()
do
[ -e "$file" ] || continue
_add_microservice $file "${enable_core}"
_add_microservice $file "${enable_respawn}" "${enable_core}"
done
fi
}
@@ -88,7 +92,7 @@ _start_single_service()
file="$(ls -1 ${BBFDM_MICROSERVICE_DIR}/${service}.json)"
[ -e "$file" ] || return
_add_microservice $file "0"
_add_microservice $file "0" "0"
fi
}

View File

@@ -22,6 +22,8 @@ validate_bbfdm_bbfdmd_section()
'sock:string' \
'debug:bool:false' \
'loglevel:uinteger:1' \
'refresh_time:uinteger:0' \
'transaction_timeout:uinteger:30' \
'subprocess_level:uinteger'
}
@@ -39,7 +41,7 @@ configure_bbfdmd()
[ "${enable}" -eq 0 ] && return 0
if [ -f "${BBFDM_JSON_INPUT}" ]; then
echo "$(jq --arg log ${loglevel} --arg level ${subprocess_level} '.daemon.config += {"loglevel": $log, "subprocess_level": $level}' ${BBFDM_JSON_INPUT})" > "${BBFDM_TEMP_DIR}/input.json"
echo "$(jq --arg log ${loglevel} --arg tran ${transaction_timeout} --arg refresh ${refresh_time} --arg level ${subprocess_level} '.daemon.config += {"loglevel": $log, "refresh_time": $refresh, "transaction_timeout": $tran, "subprocess_level": $level}' ${BBFDM_JSON_INPUT})" > "${BBFDM_TEMP_DIR}/input.json"
fi
procd_set_param command ${PROG}

View File

@@ -0,0 +1,18 @@
#!/bin/sh
. /lib/functions.sh
remove_nonexisting_microservice() {
local input_json
config_get input_json "$1" input_json ""
if [ -z "${input_json}" ]; then
uci_remove bbfdm "${1}"
fi
}
config_load bbfdm
config_foreach remove_nonexisting_microservice "micro_service"
exit 0

View File

@@ -0,0 +1,34 @@
#!/bin/sh
UNIFIED_PATH="/usr/share/bbfdm/plugins/"
log() {
echo "$@" | logger -t bbfdm.uci-default -p info
}
# Link JSON plugins
for f in `ls -1 /etc/bbfdm/json/*.json`; do
log "# BBFDM JSON plugin ${f} not aligned #"
ln -s ${f} "${UNIFIED_PATH}"
done
# Link DotSo plugins
for f in `ls -1 /usr/lib/bbfdm/*.so`; do
log "# BBFDM DotSO plugin ${f} not aligned #"
ln -s ${f} "${UNIFIED_PATH}"
done
# Link JSON plugins
for f in `ls -1 /etc/bbfdm/plugins/*.json`; do
log "# BBFDM JSON plugin ${f} not aligned #"
ln -s ${f} "${UNIFIED_PATH}"
done
# Link DotSo plugins
for f in `ls -1 /etc/bbfdm/plugins/*.so`; do
log "# BBFDM DotSO plugin ${f} not aligned #"
ln -s ${f} "${UNIFIED_PATH}"
done
exit 0

View File

@@ -1,11 +0,0 @@
if PACKAGE_bridgemngr
menu "Configuration"
config BRIDGEMNGR_BRIDGE_VLAN
bool "Use bridge-vlan backend"
help
Set this option to use bridge-vlan as backend for VLAN objects.
endmenu
endif

View File

@@ -5,14 +5,14 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=bridgemngr
PKG_VERSION:=1.0.5
PKG_VERSION:=1.0.2
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/network/bridgemngr
PKG_SOURCE_VERSION:=c0f2e17f6d4f96aecfe72ab90be885939413176d
PKG_SOURCE_VERSION:=9cddf87b527ef1614a8a39db67e6578ff1810031
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif
@@ -33,18 +33,10 @@ define Package/bridgemngr/description
Package to add Device.Bridging. data model support.
endef
define Package/$(PKG_NAME)/config
source "$(SOURCE)/Config.in"
endef
MAKE_PATH:=src
TARGET_CFLAGS += -DBBF_VENDOR_PREFIX=\\\"$(CONFIG_BBF_VENDOR_PREFIX)\\\"
ifeq ($(CONFIG_BRIDGEMNGR_BRIDGE_VLAN),y)
TARGET_CFLAGS += -DBRIDGE_VLAN_BACKEND
endif
define Package/bridgemngr/install
$(BBFDM_INSTALL_MS_DM) $(PKG_BUILD_DIR)/src/libbridgemngr.so $(1) $(PKG_NAME)
ifeq ($(findstring iopsys,$(CONFIG_BBF_VENDOR_LIST)),iopsys)

View File

@@ -7,13 +7,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=bulkdata
PKG_VERSION:=2.1.11
PKG_VERSION:=2.1.10
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/bbf/bulkdata.git
PKG_SOURCE_VERSION:=5dd9cd3cfc95e9dce5f64fe9cadd274bb31b8fa6
PKG_SOURCE_VERSION:=e472e90feec31d9f318ea8c732ab564002e25db1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif

View File

@@ -12,21 +12,12 @@ start_service() {
config_load bulkdata
config_get_bool enable bulkdata enable 1
if [ ! -f "/var/state/bulkdatad" ]; then
touch /var/state/bulkdatad
uci -q -c /var/state set bulkdatad.global='global'
uci -q -c /var/state commit bulkdatad
fi
if [ "$enable" -eq "1" ]; then
[ "$enable" -eq "1" ] && {
procd_open_instance "bulkdata"
procd_set_param command "$PROG"
procd_set_param respawn
procd_close_instance "bulkdata"
else
uci -q -c /var/state set bulkdatad.global.status='Disabled'
uci -q -c /var/state commit bulkdatad
fi
}
}
reload_service() {

View File

@@ -5,13 +5,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=ddnsmngr
PKG_VERSION:=1.0.7
PKG_VERSION:=1.0.5
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/network/ddnsmngr.git
PKG_SOURCE_VERSION:=4b0c679c4dc3e3725de5c0c55ed60f24b87c6edd
PKG_SOURCE_VERSION:=f3c818322747922035a9eafe5de05d148ce15b4b
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif

View File

@@ -5,12 +5,12 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=decollector
PKG_VERSION:=6.0.1.1
PKG_VERSION:=6.0.0.9
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=64b16101bd349eeda573094109402b885543c94e
PKG_SOURCE_VERSION:=08cd179438b8085c19e7d7523c9b26adfcb93129
PKG_SOURCE_URL:=https://dev.iopsys.eu/multi-ap/decollector.git
PKG_SOURCE:=$(PKG_NAME)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip

View File

@@ -2,13 +2,13 @@ include $(TOPDIR)/rules.mk
PKG_NAME:=dectmngr
PKG_RELEASE:=3
PKG_VERSION:=3.6.9
PKG_VERSION:=3.6.6
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_URL:=https://dev.iopsys.eu/iopsys/dectmngr.git
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=82d5bae7ef1d54b41029c9f87e8c821cde38a28e
PKG_SOURCE_VERSION:=85c173d1fac535726b2e750be8c282b74fb7dbca
PKG_MIRROR_HASH:=skip
endif

View File

@@ -5,13 +5,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=dhcpmngr
PKG_VERSION:=1.0.3
PKG_VERSION:=1.0.1
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/network/dhcpmngr.git
PKG_SOURCE_VERSION:=93f756f80a9391afd9b01f2608e031c4db3ca48b
PKG_SOURCE_VERSION:=d15d21766c1a1e5054b2391de1cfc4708be7a14c
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif
@@ -28,9 +28,7 @@ define Package/dhcpmngr
SECTION:=net
CATEGORY:=Network
TITLE:=Package to add Device.DHCPv4 and v6 data model support.
DEPENDS:=+libuci +libubox +libubus +libblobmsg-json +libjson-c +libbbfdm-api +odhcpd
DEPENDS+=+DNSMNGR_DNS_SD:umdns
DEPENDS+=+DNSMNGR_BACKEND_DNSMASQ:dnsmasq
DEPENDS:=+libuci +libubox +libubus +libblobmsg-json +libjson-c +libbbfdm-api +dnsmasq
endef
define Package/dhcpmngr/description
@@ -46,10 +44,6 @@ endif
define Package/dhcpmngr/install
$(INSTALL_DIR) $(1)/etc/udhcpc.user.d
$(INSTALL_BIN) ./files/etc/udhcpc.user.d/udhcpc_lease_start_time.user $(1)/etc/udhcpc.user.d/udhcpc_lease_start_time.user
ifeq ($(CONFIG_DNSMNGR_BACKEND_UNBOUND),y)
$(INSTALL_DIR) $(1)/etc/uci-defaults
$(INSTALL_DATA) ./files/etc/uci-defaults/unbound.odhcpd.uci_default $(1)/etc/uci-defaults/16-set-unbound-as-odhcpd-leasetrigger
endif
$(BBFDM_INSTALL_MS_DM) $(PKG_BUILD_DIR)/src/libdhcpmngr.so $(1) $(PKG_NAME)
endef

View File

@@ -1,18 +0,0 @@
#!/bin/sh
# update odhcpd uci to use unbound's script as leasetrigger
uci -q get dhcp.odhcpd >/dev/null 2>&1 && {
maindhcp="$(uci -q get dhcp.odhcpd.maindhcp)"
# if odhcpd is the main dhcp
[ "$maindhcp" = "1" ] || [ "$maindhcp" = "true" ] || [ "$maindhcp" = "on" ] && {
# if unbound daemon and unbound script file is present
[ -e /usr/lib/unbound/odhcpd.sh ] && [ -e /usr/sbin/unbound ] && {
# then set unbound script as leasetrigger in dhcp UCI
uci -q set dhcp.odhcpd.leasetrigger='/usr/lib/unbound/odhcpd.sh'
uci commit dhcp
}
}
}
exit 0

View File

@@ -1,30 +0,0 @@
if PACKAGE_dnsmngr
menu "Configuration"
config DNSMNGR_DNS_SD
bool "Include Device.DNS.SD. TR-181 support"
default y
help
Set this option to include support for TR-181 DNS.SD. object.
choice
prompt "Select backend for DNS management"
default DNSMNGR_BACKEND_DNSMASQ
depends on PACKAGE_dnsmngr
help
Select which backend daemon to use for DNS
config DNSMNGR_BACKEND_DNSMASQ
bool "Use dnsmasq for dns and dhcp"
help
Enable this option to use dnsmasq + odhcpd for dns and dhcp.
config DNSMNGR_BACKEND_UNBOUND
bool "Use unbound with odhcpd-full for dns and dhcp"
help
Enable this option to use unbound + odhcpd for dns and dhcp.
endchoice
endmenu
endif

View File

@@ -5,13 +5,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=dnsmngr
PKG_VERSION:=1.0.10
PKG_VERSION:=1.0.6
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/network/dnsmngr.git
PKG_SOURCE_VERSION:=dd7a285798b48e199f5e5d90d9c20cb5e0c14888
PKG_SOURCE_VERSION:=03d8d79c1221adb92b5789c03e2489d26c6ae184
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif
@@ -27,47 +27,23 @@ MAKE_PATH:=src
define Package/dnsmngr
SECTION:=net
CATEGORY:=Network
DEPENDS:=+libuci +libubox +libubus +libblobmsg-json +libjson-c +libbbfdm-api +odhcpd
DEPENDS+=+DNSMNGR_DNS_SD:umdns
DEPENDS+=+DNSMNGR_BACKEND_DNSMASQ:dnsmasq
DEPENDS+=+DNSMNGR_BACKEND_UNBOUND:unbound-daemon +DNSMNGR_BACKEND_UNBOUND:unbound-control
TITLE:=Package to configure DNS backend and TR-181 support
DEPENDS:=+libuci +libubox +libubus +libblobmsg-json +libjson-c +libbbfdm-api +dnsmasq +umdns
TITLE:=Package to add Device.DNS. datamodel support
endef
define Package/dnsmngr/description
Package to configure DNS backend and TR-181 support.
Package to add Device.DNS. datamodel support.
endef
define Package/$(PKG_NAME)/config
source "$(SOURCE)/Config.in"
endef
ifeq ($(CONFIG_DNSMNGR_DNS_SD),y)
define Build/Compile
$(call Build/Compile/Default,all)
endef
else
define Build/Compile
$(call Build/Compile/Default,dns)
endef
endif
ifeq ($(LOCAL_DEV),1)
define Build/Prepare
$(CP) -rf ~/git/dnsmngr/* $(PKG_BUILD_DIR)/
endef
endif
ifeq ($(CONFIG_DNSMNGR_BACKEND_DNSMASQ),y)
TARGET_CFLAGS += -DDNSMASQ_BACKEND
endif
define Package/dnsmngr/install
$(BBFDM_INSTALL_MS_DM) $(PKG_BUILD_DIR)/src/libdnsmngr.so $(1) $(PKG_NAME)
$(BBFDM_INSTALL_SCRIPT) -d $(PKG_BUILD_DIR)/scripts/nslookup $(1)
ifeq ($(CONFIG_DNSMNGR_DNS_SD),y)
$(BBFDM_INSTALL_MS_PLUGIN) $(PKG_BUILD_DIR)/src/libdns_sd.so $(1) $(PKG_NAME)
endif
endef
$(eval $(call BuildPackage,dnsmngr))

View File

@@ -8,13 +8,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=dslmngr
PKG_VERSION:=1.2.6
PKG_VERSION:=1.2.4
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/hal/dslmngr.git
PKG_SOURCE_VERSION:=2b1ecbd2079dbd88ed6d58b277b91dcf5038d869
PKG_SOURCE_VERSION:=d71bef278b8222dee1c278723f8264aa8faf5e40
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MAINTAINER:=Rahul Thakur <rahul.thakur@iopsys.eu>
PKG_MIRROR_HASH:=skip

47
easy-qos/Makefile Normal file
View File

@@ -0,0 +1,47 @@
#
# Copyright (C) 2019 iopsys Software Solutions AB
#
# This is free software, licensed under the GNU General Public License v2.
#
include $(TOPDIR)/rules.mk
PKG_NAME:=easy-qos
PKG_VERSION:=1.1
PKG_RELEASE:=0
PKG_LICENSE:=GPLv2
PKG_LICENSE_FILES:=none
include $(INCLUDE_DIR)/package.mk
define Package/easy-qos
SECTION:=net
CATEGORY:=Network
TITLE:=Easy QoS
DEPENDS:=@(TARGET_brcmbca||TARGET_airoha)
endef
define Package/easy-qos/description
This package contains Easy QoS utility
endef
define Build/Prepare
mkdir -p $(PKG_BUILD_DIR)
$(CP) ./files/* $(PKG_BUILD_DIR)/
endef
define Build/Compile
endef
define Package/easy-qos/install
$(INSTALL_DIR) $(1)/etc/config
$(INSTALL_DIR) $(1)/etc/init.d
$(INSTALL_DIR) $(1)/etc/uci-defaults
$(CP) ./files/etc/config/easy_qos $(1)/etc/config/
$(CP) ./files/etc/init.d/easy_qos.iptables $(1)/etc/init.d/easy_qos
$(CP) ./files/etc/uci-defaults/* $(1)/etc/uci-defaults/
$(CP) ./files/etc/firewall.easyqos $(1)/etc/firewall.easyqos
endef
$(eval $(call BuildPackage,easy-qos))

View File

View File

@@ -0,0 +1 @@
/etc/init.d/easy_qos reload

View File

@@ -0,0 +1,140 @@
#!/bin/sh /etc/rc.common
START=99
USE_PROCD=1
log() {
echo "${@}"|logger -t easy_qos.ebtable -p debug
}
exec_log() {
${@}
if [ "${?}" -ne 0 ]; then
log "Failed to create ${@}";
fi
}
get_priority() {
local prio=$(echo $1|tr 'A-Z' 'a-z');
case "${prio}" in
"lowest")
echo 0;;
"low")
echo 1;;
"besteffort")
echo 2;;
"normal")
echo 3;;
"video")
echo 4;;
"medium")
echo 5;;
"high")
echo 6;;
"highest")
echo 7;;
esac
}
validate_rule_section()
{
uci_validate_section easy_qos rule "${1}" \
'priority:string' \
'macaddr:string' \
'proto:string:none' \
'port:list(uinteger)' \
'comment:string:none'
}
# Clear existing rules before applying new rules
clear_existing_rules() {
local rule=$(ebtables --concurrent -t broute -L BROUTING|grep -m 1 mark)
while [ -n "${rule}" ]; do
exec_log ebtables --concurrent -t broute -D BROUTING ${rule}
rule=$(ebtables --concurrent -t broute -L BROUTING|grep -m 1 mark)
done
}
create_rule() {
local protocol=$1; shift
local mac=$1; shift
local mark="0x$1"; shift
local forward_port=$1;
local cmd="";
local protocol_number
cmd="-j mark --mark-or ${mark}";
if [ -n "${forward_port}" ]; then
cmd="--ip-destination-port ${forward_port} ${cmd}";
fi
case "${protocol}" in
"tcp")
protocol_number=6;;
"udp")
protocol_number=17;;
"dccp")
protocol_number=33;;
"sctp")
protocol_number=132;;
*)
log "Protocol ${protocol} not supported in ebtables"
return;;
esac
cmd="--ip-proto ${protocol_number} $cmd"
cmd="-p ip $cmd"
cmd="-s ${mac} $cmd"
exec_log ebtables --concurrent -t broute -A BROUTING ${cmd}
}
manage_rule() {
local cfg="$1"
local priority macaddr proto port comment prio_num protocol
validate_rule_section "${1}" || {
log "Validation of section failed"
return 1;
}
protocol=$(echo ${proto}|tr 'A-Z' 'a-z')
prio_num=$(get_priority ${priority})
if [ -n "${macaddr}" -a -n "${prio_num}" ]; then
for p in ${port}; do
if [ "${protocol}" == "none" -o "${protocol}" == "tcpudp" ]; then
create_rule tcp ${macaddr} ${prio_num} ${p}
create_rule udp ${macaddr} ${prio_num} ${p}
else
create_rule ${protocol} ${macaddr} ${prio_num} ${p}
fi
done
# Create rule for all ports if port is not mentioned in uci
if [ -z "${port}" ]; then
if [ "${protocol}" == "none" -o "${protocol}" == "tcpudp" ]; then
create_rule tcp ${macaddr} ${prio_num}
create_rule udp ${macaddr} ${prio_num}
else
create_rule ${protocol} ${macaddr} ${prio_num}
fi
fi
fi
}
reload_service() {
# Do not apply rules if ebtables is not present in system
[ -x /usr/sbin/ebtables ] || return;
clear_existing_rules
config_load easy_qos
config_foreach manage_rule rule
}
start_service() {
reload_service
}
service_triggers() {
procd_add_reload_trigger "easy_qos"
}

View File

@@ -0,0 +1,186 @@
#!/bin/sh /etc/rc.common
. /usr/share/libubox/jshn.sh
START=99
USE_PROCD=1
CLIENT_LIST="/tmp/easy_qos_client.list"
log() {
echo "${@}"|logger -t easy_qos -p debug
}
exec_log() {
${@}
if [ "${?}" -ne 0 ]; then
log "Failed to create ${@}";
fi
}
get_priority() {
local prio=$(echo $1|tr 'A-Z' 'a-z');
case "${prio}" in
"lowest")
echo 0;;
"low")
echo 1;;
"besteffort")
echo 2;;
"normal")
echo 3;;
"video")
echo 4;;
"medium")
echo 5;;
"high")
echo 6;;
"highest")
echo 7;;
esac
}
clean_client_entries() {
[ -f ${CLIENT_LIST} ] && rm ${CLIENT_LIST}
}
map_client_entries() {
local clients ip mac host
json_load "$(ubus call router.network 'clients')"
json_get_keys keys
for key in ${keys};
do
json_select ${key}
json_get_vars ipaddr macaddr hostname
clients="${macaddr} ${ipaddr} ${hostname};${clients}"
json_select ..
done
json_init
# json_add_array "clients"
IFS=";"
for client in ${clients};
do
macaddr=$(echo ${client} | cut -d" " -f1)
json_add_object "${macaddr//:/_}"
json_add_string "ip" "$(echo ${client} | cut -d" " -f2)"
json_add_string "macaddr" "$(echo ${client} | cut -d" " -f1)"
json_add_string "host" "$(echo ${client} | cut -d" " -f3)"
json_close_object
done
IFS=' '
echo `json_dump` > ${CLIENT_LIST}
json_cleanup
}
# Find the IP of a corresponding mac from arp table
get_ipaddress() {
local clients ip mac host
json_load "$(cat ${CLIENT_LIST})"
json_get_keys keys
# jshn seems a bit iffy on having : in key, replace by _
json_select "${1//:/_}" 2 > /dev/null
json_get_var ip ip
echo "$ip"
}
validate_rule_section()
{
uci_validate_section easy_qos rule "${1}" \
'priority:string' \
'macaddr:string' \
'proto:string:none' \
'port:list(uinteger)' \
'comment:string:none'
}
# Clear existing rules before applying new rules
clear_existing_rules() {
local rule=$(iptables -t mangle -S PREROUTING | grep -m 1 MARK |sed 's/-A/-D/1')
while [ -n "${rule}" ]; do
exec_log iptables -t mangle ${rule}
rule=$(iptables -t mangle -S PREROUTING | grep -m 1 MARK |sed 's/-A/-D/1')
done
}
check_and_create() {
iptables -t mangle -C PREROUTING ${@} 2>/dev/null
# Create rule if not exists
if [ ${?} -ne 0 ]; then
exec_log iptables -t mangle -A PREROUTING ${@}
else
log "Rule exists for ${@}"
fi
}
create_rule() {
local proto=$1; shift
local src_ip=$1; shift
local mark="0x$1/0x$1"; shift
local ports=$1;
local cmd="";
cmd="-j MARK --set-xmark ${mark}";
if [ -n "${ports}" ]; then
cmd="--match multiport --dports ${ports} ${cmd}";
fi
if [ "${proto}" == "icmp" ]; then
cmd="-p icmp -m icmp --icmp-type 8 $cmd"
elif [ "${proto}" == "all" ]; then
cmd="-p all $cmd"
else
cmd="-p ${proto} -m ${proto} $cmd"
fi
cmd="-s ${src_ip} $cmd"
check_and_create ${cmd}
}
manage_rule() {
local cfg="$1"
local priority macaddr proto port comment prio_num ip port_list
validate_rule_section "${1}" || {
log "Validation of section failed"
return 1;
}
prio_num=$(get_priority ${priority})
ip=$(get_ipaddress ${macaddr})
port_list=$(echo ${port}|sed 's/ /,/g')
if [ -n "${ip}" -a -n "${prio_num}" ]; then
if [ "${proto}" == "none" -o "${proto}" == "tcpudp" ]; then
create_rule tcp ${ip} ${prio_num} ${port_list}
create_rule udp ${ip} ${prio_num} ${port_list}
else
create_rule ${proto} ${ip} ${prio_num} ${port_list}
fi
fi
}
reload_service() {
clear_existing_rules
map_client_entries
config_load easy_qos
config_foreach manage_rule rule
clean_client_entries
}
start_service() {
reload_service
echo "Easy QoS installed">/dev/console;
}
service_triggers() {
procd_add_reload_trigger "easy_qos"
}

View File

@@ -0,0 +1,8 @@
# Add firewall include
uci -q batch <<-EOT
delete firewall.easyqos
set firewall.easyqos=include
set firewall.easyqos.path=/etc/firewall.easyqos
set firewall.easyqos.reload=1
commit firewall
EOT

View File

@@ -6,13 +6,13 @@ include $(TOPDIR)/rules.mk
include $(INCLUDE_DIR)/kernel.mk
PKG_NAME:=ebtables-extensions
PKG_VERSION:=1.0.4
PKG_VERSION:=1.0.2
PKG_LICENSE:=GPL-2.0
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=35fb79f95c47d90e3791c7e126048b451f078f24
PKG_SOURCE_VERSION:=a23a70f5518a42d663156a156c1e3356f695b5ad
PKG_SOURCE_URL:=https://dev.iopsys.eu/network/ebtables-extensions.git
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip

View File

@@ -5,13 +5,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=ethmngr
PKG_VERSION:=2.1.9
PKG_VERSION:=2.1.7
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/hal/ethmngr.git
PKG_SOURCE_VERSION:=2d35e86cc8dfd7ef4e0d8579f5d314e90faadc90
PKG_SOURCE_VERSION:=d029ce86fe99b7896f096f68eda3f6caa000ee5f
PKG_MAINTAINER:=Rahul Thakur <rahul.thakur@iopsys.eu>
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)_$(PKG_SOURCE_VERSION).tar.xz
PKG_MIRROR_HASH:=skip

View File

@@ -5,13 +5,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=firewallmngr
PKG_VERSION:=1.0.5
PKG_VERSION:=1.0.1
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/network/firewallmngr.git
PKG_SOURCE_VERSION:=94246676dc2e2db29b94fcffec1be3cee3ec8e9f
PKG_SOURCE_VERSION:=f5c3e2c93a8a992ab24291eb2c67adf77de7f896
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif
@@ -53,21 +53,15 @@ endif
define Package/firewallmngr/install
$(INSTALL_DIR) $(1)/etc/config
$(INSTALL_DIR) $(1)/etc/uci-defaults
ifeq ($(CONFIG_FIREWALLMNGR_PORT_TRIGGER),y)
$(INSTALL_DIR) $(1)/etc/init.d
$(INSTALL_DIR) $(1)/etc/config
$(INSTALL_DIR) $(1)/lib/port-trigger
$(INSTALL_BIN) ./files/port-trigger/etc/init.d/port-trigger $(1)/etc/init.d/
$(INSTALL_DATA) ./files/port-trigger/etc/config/port-trigger $(1)/etc/config/
$(INSTALL_DATA) ./files/port-trigger/lib/port-trigger/port_trigger.sh $(1)/lib/port-trigger/
endif
$(INSTALL_BIN) ./files/firewall.portmap $(1)/etc/
$(INSTALL_DATA) ./files/etc/uci-defaults/95-portmap-firewall $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/firewall.service $(1)/etc/
$(INSTALL_DATA) ./files/etc/uci-defaults/97-firewall-service $(1)/etc/uci-defaults/
$(BBFDM_INSTALL_MS_DM) $(PKG_BUILD_DIR)/src/libfirewallmngr.so $(1) $(PKG_NAME)
endef

View File

@@ -1,175 +0,0 @@
#
# Copyright (C) 2024 IOPSYS
#
include $(TOPDIR)/rules.mk
PKG_NAME:=fluent-bit
PKG_VERSION:=3.1.0
PKG_RELEASE:=$(AUTORELEASE)
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE:=$(PKG_NAME)-v$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://github.com/fluent/fluent-bit/archive/v$(PKG_VERSION)
PKG_HASH:=7a49e110cf3050b6c29c911063494b8081f3c743274d1d95e52562d0476ba1eb
endif
PKG_LICENSE:=Apache-2.0
PKG_LICENSE_FILES:=LICENSE
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/cmake.mk
define Package/fluent-bit
CATEGORY:=Utilities
DEPENDS:= +libyaml +libopenssl +libcurl +libatomic +musl-fts +flex +bison
TITLE:=Fluent-Bit
URL:=https://fluentbit.io/
endef
define Package/fluent-bit/description
Fluent Bit is a super fast, lightweight, and highly scalable logging and metrics processor and forwarder.
endef
ifeq ($(LOCAL_DEV),1)
define Build/Prepare
$(CP) -rf ./fluent-bit/* $(PKG_BUILD_DIR)/
endef
endif
# General options
TARGET_LDFLAGS +=-lfts -latomic
CMAKE_OPTIONS+= \
-DFLB_RELEASE=Yes \
-DFLB_SMALL=No \
-DEXCLUDE_FROM_ALL=true \
-DFLB_SHARED_LIBS=Yes \
-DFLB_DEBUG=Yes \
-DFLB_ALL=No \
-DFLB_JEMALLOC=No \
-DFLB_EXAMPLES=No \
-DFLB_CHUNK_TRACE=No \
-DFLB_BACKTRACE=No \
-DFLB_WASM=No \
-DFLB_LUAJIT=No
# In plugins
CMAKE_OPTIONS += \
-DFLB_IN_SYSLOG=Yes \
-DFLB_IN_CPU=Yes \
-DFLB_IN_MEM=Yes \
-DFLB_IN_DISK=Yes \
-DFLB_IN_EXEC=Yes \
-DFLB_IN_HEAD=Yes \
-DFLB_IN_FORWARD=No \
-DFLB_IN_KMSG=No \
-DFLB_IN_PROC=No \
-DFLB_IN_RANDOM=No \
-DFLB_IN_SERIAL=No \
-DFLB_IN_MQTT=No \
-DFLB_IN_STDIN=No \
-DFLB_IN_SYSTEMD=No \
-DFLB_IN_TAIL=No \
-DFLB_IN_TCP=No \
-DFLB_IN_THERMAL=No \
-DFLB_IN_UDP=No \
-DFLB_IN_DOCKER=No \
-DFLB_IN_EXEC_WASI=No \
-DFLB_IN_EVENT_TYPE=No \
-DFLB_IN_FLUENTBIT_METRICS=No \
-DFLB_IN_KUBERNETES_EVENTS=No \
-DFLB_IN_KAFKA=No \
-DFLB_IN_LIB=No \
-DFLB_IN_SYSTEMD=No \
-DFLB_IN_DUMMY=No \
-DFLB_IN_NETIF=No \
-DFLB_IN_COLLECTD=No \
-DFLB_IN_PROMETHEUS_SCRAPE=No \
-DFLB_IN_STATSD=No \
-DFLB_IN_STORAGE_BACKLOG=No \
-DFLB_IN_PODMAN_METRICS=No \
-DFLB_IN_OPENTELEMETRY=No \
-DFLB_IN_ELASTICSEARCH=No \
-DFLB_IN_CALYPTIA_FLEET=No \
-DFLB_IN_SPLUNK=No
-DFLB_IN_HEALTH=No \
-DFLB_IN_WINLOG=No \
-DFLB_IN_WINEVTLOG=No
# Filter options
CMAKE_OPTIONS +=
-DFLB_FILTER_AWS=No \
-DFLB_FILTER_ECS=No \
-DFLB_FILTER_KUBERNETES=No \
-DFLB_FILTER_LUA=No \
-DFLB_FILTER_NEST=No \
-DFLB_FILTER_RECORD_MODIFIER=No \
-DFLB_FILTER_THROTTLE=No \
-DFLB_FILTER_TYPE_CONVERTER=No \
-DFLB_FILTER_WASM=No \
-DFLB_FILTER_TENSORFLOW=No \
-DFLB_FILTER_GEOIP2=No \
-DFLB_FILTER_NIGHTFALL=No
# out plugins
CMAKE_OPTIONS += \
-DFLB_OUT_EXIT=Yes \
-DFLB_OUT_FORWARD=Yes \
-DFLB_OUT_HTTP=Yes \
-DFLB_OUT_NATS=Yes \
-DFLB_OUT_TCP=Yes \
-DFLB_OUT_UDP=Yes \
-DFLB_OUT_FILE=Yes \
-DFLB_OUT_STDOUT=Yes \
-DFLB_OUT_SYSLOG=Yes \
-DFLB_OUT_NULL=Yes \
-DFLB_OUT_PLOT=No \
-DFLB_OUT_AZURE=No \
-DFLB_OUT_AZURE_BLOB=No \
-DFLB_OUT_AZURE_LOGS_INGESTION=No \
-DFLB_OUT_AZURE_KUSTO=No \
-DFLB_OUT_BIGQUERY=No \
-DFLB_OUT_CALYPTIA=No \
-DFLB_OUT_COUNTER=No \
-DFLB_OUT_DATADOG=No \
-DFLB_OUT_ES=No \
-DFLB_OUT_GELF=No \
-DFLB_OUT_INFLUXDB=No \
-DFLB_OUT_NRLOGS=No \
-DFLB_OUT_OPENSEARCH=No \
-DFLB_OUT_TD=No \
-DFLB_OUT_SKYWALKING=No \
-DFLB_OUT_SLACK=No \
-DFLB_OUT_SPLUNK=No \
-DFLB_OUT_STACKDRIVER=No \
-DFLB_OUT_LIB=No \
-DFLB_OUT_FLOWCOUNTER=No \
-DFLB_OUT_LOGDNA=No \
-DFLB_OUT_LOKI=No \
-DFLB_OUT_KAFKA=No \
-DFLB_OUT_KAFKA_REST=No \
-DFLB_OUT_CLOUDWATCH_LOGS=No \
-DFLB_OUT_KINESIS_FIREHOSE=No \
-DFLB_OUT_KINESIS_STREAMS=No \
-DFLB_OUT_OPENTELEMETRY=No \
-DFLB_OUT_PROMETHEUS_EXPORTER=No \
-DFLB_OUT_PROMETHEUS_REMOTE_WRITE=No \
-DFLB_OUT_S3=No \
-DFLB_OUT_VIVO_EXPORTER=No \
-DFLB_OUT_WEBSOCKET=No \
-DFLB_OUT_ORACLE_LOG_ANALYTICS=No \
-DFLB_OUT_CHRONICLE=No \
-DFLB_OUT_PGSQL=No
define Package/fluent-bit/install
$(INSTALL_DIR) $(1)/usr/sbin
$(INSTALL_DIR) $(1)/etc/fluent-bit
$(INSTALL_BIN) $(PKG_BUILD_DIR)/bin/fluent-bit $(1)/usr/sbin/
$(INSTALL_DATA) ./files/fluent-bit.conf $(1)/etc/fluent-bit/fluent-bit.conf
$(INSTALL_DATA) $(PKG_BUILD_DIR)/conf/parsers.conf $(1)/etc/fluent-bit/parsers.conf
endef
$(eval $(call BuildPackage,fluent-bit))

View File

@@ -1,15 +0,0 @@
[SERVICE]
flush 3
daemon Off
log_level info
parsers_file /etc/fluent-bit/parsers.conf
[INPUT]
name syslog
tag syslog
path /dev/log
[OUTPUT]
name null
match *

View File

@@ -1,14 +0,0 @@
diff --git a/plugins/out_file/file.c b/plugins/out_file/file.c
index 2e47c9666..42ace24c6 100644
--- a/plugins/out_file/file.c
+++ b/plugins/out_file/file.c
@@ -45,6 +45,9 @@
#define NEWLINE "\n"
#endif
+#undef PATH_MAX
+#define PATH_MAX 256
+
struct flb_file_conf {
const char *out_path;
const char *out_file;

View File

@@ -1,45 +0,0 @@
diff --git a/plugins/out_file/file.c b/plugins/out_file/file.c
index 2e47c9666..95d28e438 100644
--- a/plugins/out_file/file.c
+++ b/plugins/out_file/file.c
@@ -27,6 +27,7 @@
#include <msgpack.h>
#include <stdio.h>
+#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
@@ -55,6 +56,7 @@ struct flb_file_conf {
int csv_column_names;
int mkdir;
struct flb_output_instance *ins;
+ char hostname[256];
};
static char *check_delimiter(const char *str)
@@ -141,6 +143,9 @@ static int cb_file_init(struct flb_output_instance *ins,
}
}
+ if (gethostname(ctx->hostname, sizeof(ctx->hostname)) != 0)
+ snprintf(ctx->hostname, sizeof(ctx->hostname), "%s", "localhost");
+
tmp = flb_output_get_property("delimiter", ins);
ret_str = check_delimiter(tmp);
if (ret_str != NULL) {
@@ -233,12 +238,8 @@ static int template_output_write(struct flb_file_conf *ctx,
int i;
msgpack_object_kv *kv;
- /*
- * Right now we treat "{time}" specially and fill the placeholder
- * with the metadata timestamp (formatted as float).
- */
- if (!strncmp(key, "time", size)) {
- fprintf(fp, "%f", flb_time_to_double(tm));
+ if (!strncmp(key, "hostname", size)) {
+ fprintf(fp, "%s", ctx->hostname);
return 0;
}

View File

@@ -5,12 +5,12 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=hostmngr
PKG_VERSION:=1.2.6
PKG_VERSION:=1.2.5
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=aa365710b227ba82b1c43f9cdf497261edb21852
PKG_SOURCE_VERSION:=20402c9fb60afa0678d844b0401e1b89699ff8b3
PKG_SOURCE_URL:=https://dev.iopsys.eu/iopsys/hostmngr.git
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)_$(PKG_SOURCE_VERSION).tar.xz
PKG_MIRROR_HASH:=skip

View File

@@ -1,6 +0,0 @@
menu "Configuration"
config ICWMP_MGMT_FROM_USP
bool "Support configuration of ManagementServer from USP"
default y
endmenu

View File

@@ -8,13 +8,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=icwmp
PKG_VERSION:=9.8.11
PKG_VERSION:=9.7.15
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/bbf/icwmp.git
PKG_SOURCE_VERSION:=d2007e860693a12fa23d275cca3e8e36df17c15d
PKG_SOURCE_VERSION:=fb3c8833f3fce7fe3659395cab48e7078f2007d5
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif
@@ -32,21 +32,8 @@ define Package/icwmp
SUBMENU:=TRx69
TITLE:=TR069 CWMP client
DEPENDS:=+libuci +libubox +libblobmsg-json +libubus +libjson-c +libcurl +mxml +libuuid +libbbfdm-api +libopenssl
MENU:=1
endef
define Package/icwmp/description
TR069 client implementation with bbfdm backend for TR181 support
endef
define Package/icwmp/config
source "$(SOURCE)/Config.in"
endef
ifeq ($(CONFIG_ICWMP_MGMT_FROM_USP),y)
EXTRA_CFLAGS += -DCWMP_DUAL_SUPPORT=BBFDM_BOTH
endif
ifeq ($(LOCAL_DEV),1)
define Build/Prepare
$(CP) -rf ~/git/icwmp/* $(PKG_BUILD_DIR)/
@@ -64,7 +51,6 @@ define Package/icwmp/install
$(INSTALL_BIN) $(PKG_BUILD_DIR)/icwmpd $(1)/usr/sbin/icwmpd
$(INSTALL_DATA) ./files/etc/config/cwmp $(1)/etc/config/cwmp
$(INSTALL_BIN) ./files/etc/firewall.cwmp $(1)/etc/firewall.cwmp
$(INSTALL_BIN) ./files/etc/critical_services.json $(1)/etc/icwmpd/critical_services.json
$(INSTALL_BIN) ./files/etc/init.d/icwmpd $(1)/etc/init.d/icwmpd
$(INSTALL_BIN) ./files/etc/uci-defaults/85-cwmp-set-userid $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/uci-defaults/90-cwmpfirewall $(1)/etc/uci-defaults/

View File

@@ -41,7 +41,6 @@ config cpe 'cpe'
option active_notif_throttle '0'
option disable_gatewayinfo '0'
option fw_upgrade_keep_settings '1'
option clock_sync_timeout '128'
config lwn 'lwn'
option enable '0'

View File

@@ -1,11 +0,0 @@
{
"services_list": [
"firewall",
"network",
"dhcp",
"stunc",
"xmpp",
"wireless",
"time"
]
}

View File

@@ -6,7 +6,7 @@ log() {
}
get_firewall_zone() {
zone="$(uci show firewall|grep network|grep -w ${1}|cut -d. -f 2)"
zone="$(uci show firewall|grep network|grep ${1}|cut -d. -f 2)"
zone="${zone:-wan}" # defaults to wan zone
echo "$zone"
}

View File

@@ -5,12 +5,12 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=ieee1905
PKG_VERSION:=8.5.6
PKG_VERSION:=8.4.2
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=25925684c21de7494d4973b1799f5bd121014518
PKG_SOURCE_VERSION:=4d60d4a9b55940fffa39b7799abf2a7962ae2113
PKG_SOURCE_URL:=https://dev.iopsys.eu/multi-ap/ieee1905.git
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)_$(PKG_SOURCE_VERSION).tar.xz
PKG_MIRROR_HASH:=skip

View File

@@ -4,7 +4,7 @@ PKG_NAME:=iopsys-analytics
PKG_RELEASE:=$(COMMITCOUNT)
PKG_LICENSE:=PROPRIETARY
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=f448cfe9264b4079f616b065244c4be24b516aba
PKG_SOURCE_VERSION:=fb84c1019a8a0fbfb624d9df8eb3604806645510
PKG_SOURCE_URL:=https://dev.iopsys.eu/iopsys/iopsys-analytics.git
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)_$(PKG_SOURCE_VERSION).tar.xz
PKG_MIRROR_HASH:=skip

View File

@@ -6,13 +6,13 @@ include $(TOPDIR)/rules.mk
include $(INCLUDE_DIR)/kernel.mk
PKG_NAME:=ipt-trigger
PKG_VERSION:=1.0.2
PKG_VERSION:=1.0.1
PKG_LICENSE:=GPL-2.0
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=4f3d4427403e0a9be7653c1b92907ae8ae5f21ae
PKG_SOURCE_VERSION:=8d4b4520a2935a5717a27f486a3fc78357b2a0cd
PKG_SOURCE_URL:=https://dev.iopsys.eu/network/ipt-trigger.git
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip

View File

@@ -5,12 +5,12 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=libdpp
PKG_VERSION:=2.1.1
PKG_VERSION:=2.1.0
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=6024efd3db9dd490c07465ea9b0c15120063165c
PKG_SOURCE_VERSION:=1f82436531d4bb094b0b74e99613e0dfc84eada3
PKG_SOURCE_URL:=https://dev.iopsys.eu/multi-ap/libdpp.git
PKG_MAINTAINER:=Jakob Olsson <jakob.olsson@iopsys.eu>
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)_$(PKG_SOURCE_VERSION).tar.xz

View File

@@ -5,12 +5,12 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=libethernet
PKG_VERSION:=7.2.111
PKG_VERSION:=7.2.109
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=6e7216e657dfb59e869e393ef58e6b4593c16fc7
PKG_SOURCE_VERSION:=cc72f5ab0171cd0fc29bb48dafff6751ab2f0d9c
PKG_SOURCE_URL:=https://dev.iopsys.eu/iopsys/libethernet.git
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)_$(PKG_SOURCE_VERSION).tar.xz
PKG_MIRROR_HASH:=skip

View File

@@ -5,12 +5,12 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=libqos
PKG_VERSION:=7.2.108
PKG_VERSION:=7.2.107
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=6a72e35e1a662e2f707e4901679676a9c09b3bc2
PKG_SOURCE_VERSION:=de659f50c0ae1cd4ec64315b301c53595eaf39de
PKG_SOURCE_URL:=https://dev.iopsys.eu/iopsys/libqos.git
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)_$(PKG_SOURCE_VERSION).tar.xz
PKG_MIRROR_HASH:=skip

View File

@@ -8,7 +8,7 @@ include $(TOPDIR)/rules.mk
PKG_NAME:=libvoice-broadcom
PKG_RELEASE:=1
PKG_VERSION:=1.0.14
PKG_VERSION:=1.0.13
PKG_LICENSE:=PROPRIETARY
PKG_LICENSE_FILES:=LICENSE
@@ -17,7 +17,7 @@ LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/iopsys/$(PKG_NAME).git
PKG_SOURCE_VERSION:=7fde62b9634c63b9bc71d1c20541798971a78dc8
PKG_SOURCE_VERSION:=f1509651217d027376b5b7fc3f64ca86662e9b2d
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif

View File

@@ -8,7 +8,7 @@ include $(TOPDIR)/rules.mk
PKG_NAME:=libvoice-d2
PKG_RELEASE:=1
PKG_VERSION:=1.1.13
PKG_VERSION:=1.1.11
PKG_LICENSE:=PROPRIETARY
PKG_LICENSE_FILES:=LICENSE
@@ -17,7 +17,7 @@ LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/iopsys/$(PKG_NAME).git
PKG_SOURCE_VERSION:=0b00d6e2772bf31e696fc974df071e6fcf972fa3
PKG_SOURCE_VERSION:=95fb29a31f7665abbe87af4a74cf52b7e5f22a29
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif

View File

@@ -5,12 +5,12 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=libwifi
PKG_VERSION:=7.5.4
PKG_VERSION:=7.4.69
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=008b5e169895647aac2bdf7029046a1267d429b6
PKG_SOURCE_VERSION:=8a5d8b5defb7cedc5876bb5fbee5c1ad185de889
PKG_SOURCE_URL:=https://dev.iopsys.eu/iopsys/libwifi.git
PKG_MAINTAINER:=Anjan Chanda <anjan.chanda@iopsys.eu>
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)_$(PKG_SOURCE_VERSION).tar.xz
@@ -51,6 +51,10 @@ else ifeq ($(CONFIG_TARGET_ipq53xx),y)
TARGET_PLATFORM=IPQ53XX
TARGET_WIFI_TYPE=QUALCOMM MAC80211
TARGET_CFLAGS +=-DIPQ53XX
else ifeq ($(CONFIG_TARGET_mediatek),y)
TARGET_PLATFORM=LINUX
TARGET_WIFI_TYPE=MEDIATEK MAC80211
TARGET_CFLAGS +=-DIOPSYS_LINUX
else
$(info Unexpected CONFIG_TARGET, use default MAC80211)
TARGET_PLATFORM=MAC80211

View File

@@ -1,26 +0,0 @@
if PACKAGE_logmngr
choice
prompt "Select backend for syslog management"
default LOGMNGR_BACKEND_FLUENTBIT
depends on PACKAGE_logmngr
help
Select which backend daemon to use for syslog management
config LOGMNGR_BACKEND_FLUENTBIT
bool "Use fluent-bit for log management"
help
Enable this option to use fluent-bit for log management.
config LOGMNGR_BACKEND_SYSLOG_NG
bool "Use syslog-ng for log management"
help
Enable this option to use syslog-ng for log management.
endchoice
config LOGMNGR_LOGROTATE
bool "Logrotate support"
depends on PACKAGE_logmngr
default y
help
It adds support for logrotate functionality.
endif

View File

@@ -1,74 +0,0 @@
#
# Copyright (C) 2024 iopsys
#
include $(TOPDIR)/rules.mk
PKG_NAME:=logmngr
PKG_VERSION:=1.0.1
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/system/logmngr.git
PKG_SOURCE_VERSION:=ec10abb3cc0f3b96eb806c9c67e18d9d134287e9
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif
PKG_LICENSE:=GPL-2.0-only
PKG_LICENSE_FILES:=LICENSE
include $(INCLUDE_DIR)/package.mk
include ../bbfdm/bbfdm.mk
MAKE_PATH:=bbf_plugin
define Package/logmngr
SECTION:=utils
CATEGORY:=Utilities
TITLE:=Logging Manager
DEPENDS:=+libbbfdm-api +LOGMNGR_BACKEND_FLUENTBIT:fluent-bit +LOGMNGR_LOGROTATE:logrotate
DEPENDS+=+LOGMNGR_BACKEND_SYSLOG_NG:syslog-ng
endef
define Package/logmngr/description
Configure log management. This package has the datamodel as well as the
the backend implementation for handling syslog.
endef
define Package/$(PKG_NAME)/config
source "$(SOURCE)/Config.in"
endef
ifeq ($(LOCAL_DEV),1)
define Build/Prepare
$(CP) -rf ./logmngr/* $(PKG_BUILD_DIR)/
endef
endif
define Package/logmngr/install
$(INSTALL_DIR) $(1)/etc/init.d
$(INSTALL_BIN) ./files/logmngr.init $(1)/etc/init.d/logmngr
$(INSTALL_DIR) $(1)/usr/sbin
$(INSTALL_BIN) ./files/logread $(1)/usr/sbin
$(INSTALL_DIR) $(1)/etc/uci-defaults
$(INSTALL_BIN) ./files/10-logmngr_config_generate $(1)/etc/uci-defaults/
$(INSTALL_DIR) $(1)/lib/logmngr
ifeq ($(CONFIG_LOGMNGR_BACKEND_FLUENTBIT),y)
$(INSTALL_DATA) ./files/lib/logmngr/fluent-bit.sh $(1)/lib/logmngr/.
endif
ifeq ($(CONFIG_LOGMNGR_BACKEND_SYSLOG_NG),y)
$(INSTALL_DATA) ./files/lib/logmngr/syslog-ng.sh $(1)/lib/logmngr/.
endif
$(BBFDM_INSTALL_CORE_PLUGIN) $(PKG_BUILD_DIR)/bbf_plugin/libbbfsyslog.so $(1)
ifeq ($(CONFIG_LOGMNGR_LOGROTATE),y)
$(INSTALL_BIN) ./files/11-logmngr_logrotate_config_generate $(1)/etc/uci-defaults/
$(INSTALL_DATA) ./files/lib/logmngr/logrotate.sh $(1)/lib/logmngr/.
$(BBFDM_INSTALL_CORE_PLUGIN) $(PKG_BUILD_DIR)/bbf_plugin/libbbflogrotate.so $(1)
endif
endef
$(eval $(call BuildPackage,logmngr))

View File

@@ -1,23 +0,0 @@
#!/bin/sh
if [ -s "/etc/config/logmngr" ]; then
if uci -q get logmngr.@globals[0] >/dev/null; then
# return if there is any valid content
exit
else
rm -f /etc/config/logmngr
fi
fi
touch /etc/config/logmngr
uci set logmngr.globals=globals
uci set logmngr.globals.enable=1
uci set logmngr.a1=action
uci set logmngr.a1.name="ac1"
uci set logmngr.lf1=log_file
uci set logmngr.lf1.enable=1
uci set logmngr.lf1.action="ac1"
uci set logmngr.lf1.file="/var/log/messages"
uci commit logmngr

View File

@@ -1,14 +0,0 @@
#!/bin/sh
if [ -s "/etc/config/logmngr" ]; then
if uci -q get logmngr.@log_rotate[0] >/dev/null; then
# return if there is any valid content
exit
fi
uci set logmngr.lro1=log_rotate
uci set logmngr.lro1.enable=1
uci set logmngr.lro1.file_name="/var/log/messages"
uci set logmngr.lro1.file_count=1
uci set logmngr.lro1.max_file_size=1000000
uci commit logmngr
fi

View File

@@ -1,321 +0,0 @@
#!/bin/sh
. /lib/functions.sh
. /lib/logmngr/logrotate.sh
CONF_FILE=/etc/fluent-bit/fluent-bit.conf
TMP_CONF_FILE=/tmp/fluent-bit/fluent-bit.conf
create_config_file() {
mkdir -p /tmp/fluent-bit
rm -f ${TMP_CONF_FILE}
touch ${TMP_CONF_FILE}
}
create_service_section() {
# the service section of the fluent-bit.conf file has hardcoded values,
# no need to lookup any uci section to configure this section
echo "[SERVICE]" >> ${TMP_CONF_FILE}
echo " flush 3" >> ${TMP_CONF_FILE}
echo " daemon off" >> ${TMP_CONF_FILE}
echo " log_level info" >> ${TMP_CONF_FILE}
echo " parsers_file /etc/fluent-bit/parsers.conf" >> ${TMP_CONF_FILE}
}
create_input_section() {
local tag="$1"
# the input in our case is always syslog, hence, this section of the
# fluent-bit.conf file has hardcoded values as well that do not depend
# on any uci value
echo "[INPUT]" >> ${TMP_CONF_FILE}
echo " name syslog" >> ${TMP_CONF_FILE}
echo " tag $tag" >> ${TMP_CONF_FILE}
echo " path /dev/log" >> ${TMP_CONF_FILE}
}
generate_facility_regex() {
local facility_level=$1
local pri=0
if [ "$facility_level" == "24" ]; then
# value 24 means all facility level, which is as good as not
# generating a filter section, so return
return
fi
# facility_level is a list value, hence, generate regex for
# each value
IFS=" "
for val in $facility_level; do
# as per rfc 5424 and 3164, pri in syslog msg is
# facility*8+severity. Severity value can range from 0-7 hence
# generate regex for each.
for sval in 0 1 2 3 4 5 6 7; do
pri=`expr $val \* 8 + $sval`
echo " regex pri $pri" >> ${TMP_CONF_FILE}
done
done
}
generate_severity_regex() {
local sev_level="$1"
local sev_compare="$2"
local sev_action="$3"
local pri=0
local param="exclude"
if [ "$sev_action" == "0" ]; then
param="regex"
fi
local fval=0
if [ "$sev_compare" == "0" ]; then
# generate regex for all facility values, with severity=sev_level
while [ $fval -le 23 ] ; do
pri=`expr $fval \* 8 + $sev_level`
echo " $param pri $pri" >> ${TMP_CONF_FILE}
fval=$((fval + 1))
done
elif [ "$sev_compare" == "1" ]; then
# generate regex for all severity value greater than or equal to
# sev_level. please, lower value have higher precedence, so sev_level
# 0 which is emergency has higher precedence than error which is 3
while [ $fval -le 23 ] ; do
sval=0
while [ $sev_level -ge $sval ]; do
pri=`expr $fval \* 8 + $sval`
echo " $param pri $pri" >> ${TMP_CONF_FILE}
sval=$((sval + 1))
done
fval=$((fval + 1))
done
fi
}
handle_filter_conf() {
local section="$1" # config filter
local filter_name="$2"
local name
# no need to proceed if name of filter section is not one of the values
# listed in option filter in config action section
config_get name $section name
if [ "$name" != "$filter_name" ]; then
return
fi
# as per data model, at a time either facility_level or severity_level can
# be specified along with pattern_match. hence, first process and generate
# regex for pattern_match which is common in both condition. Next, we will
# process facility_level and return if facility level is defined and not
# process severity related params at all.
local pattern_match
config_get pattern_match $section pattern_match
if [ -n "$pattern_match" ]; then
echo " regex $pattern_match" >> ${TMP_CONF_FILE}
fi
local facility_level
config_get facility_level $section facility_level
if [ -n "$facility_level" ]; then
generate_facility_regex $facility_level
# return from here since if facility_level is defined, then no
# need to process severity_level
return
fi
local sev_level
local sev_compare
local sev_action
config_get sev_level $section severity_level
if [ -n "$sev_level" ]; then
# value 1 of severity compare corresponds to data model
# and system default which is EqualorHigher
config_get sev_compare $section severity_compare 1
# value 0 of severity action corresponds to data model
# and system default that is log
config_get sev_action $section severity_action 0
generate_severity_regex $sev_level $sev_compare $sev_action
fi
}
create_filter_section() {
local match="$1"
echo "[FILTER]" >> ${TMP_CONF_FILE}
echo " name grep" >> ${TMP_CONF_FILE}
echo " match $match" >> ${TMP_CONF_FILE}
echo " logical_op or" >> ${TMP_CONF_FILE} # handle multiple filters
}
handle_filter_ref() {
local filter_name="$1"
config_foreach handle_filter_conf filter "$filter_name"
}
handle_log_file() {
local section="$1" # out_file section
local match="$2"
local action_ref
config_get action_ref $section action
if [ "$action_ref" != "$match" ]; then
return
fi
local enabled
config_get enabled $section enable
if [ "$enabled" == 0 ]; then
return
fi
local file
config_get file $section file
if [ -z "$file" ]; then
return
fi
echo "[OUTPUT]" >> ${TMP_CONF_FILE}
echo " name file" >> ${TMP_CONF_FILE}
echo " match $match" >> ${TMP_CONF_FILE}
echo " file $file" >> ${TMP_CONF_FILE}
echo " format template" >> ${TMP_CONF_FILE}
echo " template {time} {hostname} {ident}: {message}" >> ${TMP_CONF_FILE}
}
handle_log_remote() {
local section="$1"
local match="$2"
local action_ref
config_get action_ref $section action
if [ "$action_ref" != "$match" ]; then
return
fi
local enabled
config_get enabled $section enable
if [ "$enabled" == 0 ]; then
return
fi
local address
config_get address $section log_ip
if [ -z "$address" ]; then
return
fi
echo "[OUTPUT]" >> ${TMP_CONF_FILE}
echo " name syslog" >> ${TMP_CONF_FILE}
echo " match $match" >> ${TMP_CONF_FILE}
echo " host $address" >> ${TMP_CONF_FILE}
local proto # holds value tcp or udp
config_get proto $section proto
if [ -n "$proto" ]; then
if [ "$proto" == "tls" ]; then
echo " mode tcp" >> ${TMP_CONF_FILE}
echo " tls on" >> ${TMP_CONF_FILE}
else
echo " mode $proto" >> ${TMP_CONF_FILE}
fi
fi
local port
config_get port $section port
if [ -n "$port" ]; then
echo " port $port" >> ${TMP_CONF_FILE}
fi
local cert
local peer_verify
config_get cert $section cert
if [ -n "$cert" ]; then
echo " tls.crt_file $cert" >> ${TMP_CONF_FILE}
config_get peer_verify $section peer_verify
if [ "$peer_verify" == "1" ]; then
echo " tls.verify on" >> ${TMP_CONF_FILE}
fi
fi
}
handle_action() {
local section="$1"
local filter
config_get filter $section filter
# use config action option name as tag for input
local tag
config_get tag $section name
if [ -z "$tag" ]; then
return
fi
create_input_section $tag
if [ -n "$filter" ]; then
# the only fluentbit filter that is useful for the datamodel is
# grep. Also, fluentbit does not seem to handle multiple instances
# of FILTER of same kind. Hence, each filter section corresponding
# to an action entry in the uci would translate for us into a set of
# regex/exclude values instead of individual FILTER section per uci
# section filter is a list, treat according
create_filter_section $tag
IFS=" "
for finst in $filter; do
handle_filter_ref $finst
done
fi
# handle output, each action can be associated with a out_log and out_syslog
# section so figure out if any out_log or out_syslog section is associated
# with this and action and setup output accordingly.
config_foreach handle_log_file log_file "$tag"
config_foreach handle_log_remote log_remote "$tag"
}
handle_action_section() {
config_foreach handle_action action
}
apply_config_file() {
cp ${TMP_CONF_FILE} ${CONF_FILE}
}
PROG=/usr/sbin/fluent-bit
logmngr_init() {
create_config_file
config_load logmngr
local enabled
config_get enabled globals enable
if [ "$enabled" == "0" ]; then
return
fi
create_service_section
handle_action_section
apply_config_file
if [ -f /lib/logmngr/logrotate.sh ]; then
logrotate_init
fi
procd_open_instance logmngr
procd_set_param command $PROG -c $CONF_FILE
procd_set_param file $CONF_FILE
procd_set_param respawn
procd_close_instance
}

View File

@@ -1,96 +0,0 @@
#!/bin/sh
. /lib/functions.sh
LOGROTATE_FILE=/etc/logrotate.conf
LOGROTATE_TMP_FILE=/tmp/logrotate/logrotate.conf
create_logrotate_file() {
mkdir -p /tmp/logrotate
rm -f ${LOGROTATE_TMP_FILE}
touch ${LOGROTATE_FILE}
}
handle_logrotate() {
local section="$1"
local enabled
config_get enabled $section enable
if [ "$enabled" == "0" ]; then
return
fi
local file_name
config_get file_name $section file_name
if [ -z "$file_name" ]; then
# no file to rotate, return
return
fi
echo -e "$file_name {" >> ${LOGROTATE_TMP_FILE}
echo -e "\tcreate" >> ${LOGROTATE_TMP_FILE}
echo -e "\tmissingok" >> ${LOGROTATE_TMP_FILE}
echo -e "\tnotifempty" >> ${LOGROTATE_TMP_FILE}
local file_count
config_get file_count $section file_count
if [ -n "$file_count" ]; then
echo -e "\trotate $file_count" >> ${LOGROTATE_TMP_FILE}
fi
local max_file_size
config_get max_file_size $section max_file_size
if [ -n "$max_file_size" ]; then
echo -e "\tmaxsize $max_file_size" >> ${LOGROTATE_TMP_FILE}
fi
local duration
config_get duration $section duration
if [ -n "$duration" ]; then
echo -e "\tminutes $duration" >> ${LOGROTATE_TMP_FILE}
fi
local retention
config_get retention $section retention
if [ -n "$retention" ]; then
echo -e "\tmaxage $retention" >> ${LOGROTATE_TMP_FILE}
fi
local compression
config_get compression $section compression
if [ -n "$compression" ]; then
echo -e "\tcompress" >> ${LOGROTATE_TMP_FILE}
echo -e "\tcompresscmd $compression" >> ${LOGROTATE_TMP_FILE}
fi
echo -e "\tpostrotate" >> ${LOGROTATE_TMP_FILE}
echo -e "\t\tservice logmngr restart" >> ${LOGROTATE_TMP_FILE}
echo -e "\t\tsleep 1" >> ${LOGROTATE_TMP_FILE}
echo -e "\tendscript" >> ${LOGROTATE_TMP_FILE}
echo -e "}" >> ${LOGROTATE_TMP_FILE} # close the logfile section
}
apply_logrotate_file() {
cp ${LOGROTATE_TMP_FILE} ${LOGROTATE_FILE}
}
config_cron_job() {
# taking the liberty to configure the cron job hourly, that is, at the end
# of each hour, check if logrotation is needed. The logrotate daemon, when
# triggered hourly, will still honour the configure log rotation duration,
# the only slight different being that if the minutes for log rotation
# are configured in such a way that it falls within the hour, then the
# log rotation will be done at the completion of hour and not before. I do
# not think this is a drawback in the interest of keeping things simple.
sed -i '/logrotate/d' /etc/crontabs/root
echo "0 * * * * logrotate ${LOGROTATE_FILE}" >> /etc/crontabs/root
/etc/init.d/cron restart
}
logrotate_init() {
create_logrotate_file
config_foreach handle_logrotate log_rotate
apply_logrotate_file
config_cron_job
}

View File

@@ -1,345 +0,0 @@
#!/bin/sh
. /lib/functions.sh
. /lib/logmngr/logrotate.sh
CONF_FILE=/etc/syslog-ng.conf
TMP_CONF_FILE=/tmp/syslog-ng/syslog-ng.conf
create_config_file() {
mkdir -p /tmp/syslog-ng
rm -f ${TMP_CONF_FILE}
touch ${TMP_CONF_FILE}
}
create_option_section() {
# the option section of the syslog-ng.conf file has hardcoded values,
# no need to lookup any uci section to configure this section
echo -e "@version: 4.4" >> ${TMP_CONF_FILE}
echo -e '@include "scl.conf"' >> ${TMP_CONF_FILE}
echo -e "options {" >> ${TMP_CONF_FILE}
echo -e "\tchain_hostnames(no);" >> ${TMP_CONF_FILE}
echo -e "\tcreate_dirs(yes);" >> ${TMP_CONF_FILE}
echo -e "\tkeep_hostname(yes);" >> ${TMP_CONF_FILE}
echo -e "\tlog_fifo_size(256);" >> ${TMP_CONF_FILE}
echo -e "\tlog_msg_size(1024);" >> ${TMP_CONF_FILE}
echo -e "\tstats(freq(0));" >> ${TMP_CONF_FILE}
echo -e "\tflush_lines(0);" >> ${TMP_CONF_FILE}
echo -e "\tuse_fqdn(no);" >> ${TMP_CONF_FILE}
echo "};" >> ${TMP_CONF_FILE}
}
create_input_section() {
local tag="$1"
# the input in our case is always syslog, hence, this section of the
# fluent-bit.conf file has hardcoded values as well that do not depend
# on any uci value
echo -e "source $tag {" >> ${TMP_CONF_FILE}
echo -e "\tinternal();" >> ${TMP_CONF_FILE}
echo -e "\tunix-dgram("/dev/log");" >> ${TMP_CONF_FILE}
echo "};" >> ${TMP_CONF_FILE}
}
generate_facility_regex() {
local facility_level=$1
if [ "$facility_level" == "24" ]; then
# value 24 means all facility level, which is as good as not
# generating a filter section, so return
return
fi
# facility_level is a list value, hence, generate regex for
# each value
IFS=" "
for val in $facility_level; do
echo -e "\tfacility($val);" >> ${TMP_CONF_FILE}
done
}
generate_severity_regex() {
local sev_level="$1"
local sev_compare="$2"
local sev_action="$3"
if [ "$sev_compare" == "0" ]; then
case $sev_level in
"0") echo -e "\tlevel(emerg)" >> ${TMP_CONF_FILE}
;;
"1") echo -e "\tlevel(alert)" >> ${TMP_CONF_FILE}
;;
"2") echo -e "\tlevel(crit)" >> ${TMP_CONF_FILE}
;;
"3") echo -e "\tlevel(err)" >> ${TMP_CONF_FILE}
;;
"4") echo -e "\tlevel(warning)" >> ${TMP_CONF_FILE}
;;
"5") echo -e "\tlevel(notice)" >> ${TMP_CONF_FILE}
;;
"6") echo -e "\tlevel(info)" >> ${TMP_CONF_FILE}
;;
"7") echo -e "\tlevel(debug)" >> ${TMP_CONF_FILE}
;;
esac
elif [ "$sev_compare" == "1" ]; then
# generate regex for all severity value greater than or equal to
# sev_level
case $sev_level in
"0") echo -e "\tlevel(emerg)" >> ${TMP_CONF_FILE}
;;
"1") echo -e "\tlevel(alert..emerg)" >> ${TMP_CONF_FILE}
;;
"2") echo -e "\tlevel(crit..emerg)" >> ${TMP_CONF_FILE}
;;
"3") echo -e "\tlevel(err..emerg)" >> ${TMP_CONF_FILE}
;;
"4") echo -e "\tlevel(warning..emerg)" >> ${TMP_CONF_FILE}
;;
"5") echo -e "\tlevel(notice..emerg)" >> ${TMP_CONF_FILE}
;;
"6") echo -e "\tlevel(info..emerg)" >> ${TMP_CONF_FILE}
;;
"7") echo -e "\tlevel(debug..emerg)" >> ${TMP_CONF_FILE}
;;
esac
fi
}
handle_filter_conf() {
local section="$1" # config filter
local filter_name="$2"
local name
# no need to proceed if name of filter section is not one of the values
# listed in option filter in config action section
config_get name $section name
if [ "$name" != "$filter_name" ]; then
return
fi
echo -e "filter $name {" >> ${TMP_CONF_FILE}
# as per data model, at a time either facility_level or severity_level can
# be specified along with pattern_match. hence, first process and generate
# regex for pattern_match which is common in both condition. Next, we will
# process facility_level and return if facility level is defined and not
# process severity related params at all.
local pattern_match
config_get pattern_match $section pattern_match
if [ -n "$pattern_match" ]; then
# the pattern here is in tag=>value pair, hence, break the pattern
# and fill this filed
local tag=$(echo $pattern_match | awk '{print $1}')
local value=$(echo $pattern_match | awk '{print $2}')
echo -e "\tmatch("$value" value("$tag"))" >> ${TMP_CONF_FILE}
fi
local facility_level
config_get facility_level $section facility_level
if [ -n "$facility_level" ]; then
generate_facility_regex $facility_level
# return from here since if facility_level is defined, then no
# need to process severity_level
echo "};" >> ${TMP_CONF_FILE}
return
fi
local sev_level
local sev_compare
local sev_action
config_get sev_level $section severity_level
if [ -n "$sev_level" ]; then
# value 1 of severity compare corresponds to data model
# and system default which is EqualorHigher
config_get sev_compare $section severity_compare 1
# value 0 of severity action corresponds to data model
# and system default that is log
config_get sev_action $section severity_action 0
generate_severity_regex $sev_level $sev_compare $sev_action
fi
echo "};" >> ${TMP_CONF_FILE}
}
handle_filter_ref() {
local filter_name="$1"
config_foreach handle_filter_conf filter "$filter_name"
}
handle_log_file() {
local section="$1" # out_file section
local match="$2"
local filter="$3"
local action_ref
config_get action_ref $section action
if [ "$action_ref" != "$match" ]; then
return
fi
local enabled
config_get enabled $section enable
if [ "$enabled" == 0 ]; then
return
fi
local file
config_get file $section file
if [ -z "$file" ]; then
return
fi
echo -e "destination file_$match {" >> ${TMP_CONF_FILE}
echo -e "\tfile("$file");" >> ${TMP_CONF_FILE}
echo -e "};" >> ${TMP_CONF_FILE}
# now generate the log section for each action section to bring into effect
# the filter, destination, source sections create above
echo -e "log {" >> ${TMP_CONF_FILE}
echo -e "\tsource($tag);" >> ${TMP_CONF_FILE}
if [ -n "$filter" ]; then
IFS=" "
for finst in $filter; do
echo -e "\tfilter($finst);" >> ${TMP_CONF_FILE}
done
fi
echo -e "\tdestination(file_$match);" >> ${TMP_CONF_FILE} # log_file
echo -e "};" >> ${TMP_CONF_FILE} # close log section
}
handle_log_remote() {
local section="$1"
local match="$2"
local filter="$3"
local action_ref
config_get action_ref $section action
if [ "$action_ref" != "$match" ]; then
return
fi
local enabled
config_get enabled $section enable
if [ "$enabled" == 0 ]; then
return
fi
local address
config_get address $section log_ip
if [ -z "$address" ]; then
return
fi
echo -e "destination remote_$match {" >> ${TMP_CONF_FILE}
echo -e "\tsyslog(" >> ${TMP_CONF_FILE}
echo -e "\t\t"$address"" >> ${TMP_CONF_FILE}
local proto # holds value tcp or udp
config_get proto $section proto
if [ -n "$proto" ]; then
echo -e "\t\ttransport($proto)" >> ${TMP_CONF_FILE}
fi
local port
config_get port $section port
if [ -n "$port" ]; then
echo -e "\t\tport($port)" >> ${TMP_CONF_FILE}
fi
local cert
local peer_verify
config_get cert $section cert
if [ -n "$cert" ]; then
echo -e "\t\ttls(" >> ${TMP_CONF_FILE}
echo -e "\t\t\tcert-file($cert)" >> ${TMP_CONF_FILE}
config_get peer_verify $section peer_verify
if [ "$peer_verify" == "1" ]; then
echo -e "\t\t\tpeer-verify(required-trusted)" >> ${TMP_CONF_FILE}
fi
echo -e "\t\t)" >> ${TMP_CONF_FILE} # close tls section
fi
echo -e "\t);" >> ${TMP_CONF_FILE} # close syslog section
echo -e "};" >> ${TMP_CONF_FILE} # close destination section
# now generate the log section for each action section to bring into effect
# the filter, destination, source sections create above
echo -e "log {" >> ${TMP_CONF_FILE}
echo -e "\tsource($tag);" >> ${TMP_CONF_FILE}
if [ -n "$filter" ]; then
IFS=" "
for finst in $filter; do
echo -e "\tfilter($finst);" >> ${TMP_CONF_FILE}
done
fi
echo -e "\tdestination(remote_$match);" >> ${TMP_CONF_FILE} # log_file
echo -e "};" >> ${TMP_CONF_FILE} # close log section
}
handle_action() {
local section="$1"
local filter
config_get filter $section filter
# use config action option name as tag for input
local tag
config_get tag $section name
if [ -z "$tag" ]; then
return
fi
create_input_section $tag
if [ -n "$filter" ]; then
IFS=" "
for finst in $filter; do
handle_filter_ref $finst
done
fi
# handle output, each action can be associated with a out_log and out_syslog
# section so figure out if any out_log or out_syslog section is associated
# with this and action and setup output accordingly.
config_foreach handle_log_file log_file "$tag" "$filter"
config_foreach handle_log_remote log_remote "$tag" "$filter"
}
handle_action_section() {
config_foreach handle_action action
}
apply_config_file() {
cp ${TMP_CONF_FILE} ${CONF_FILE}
}
PROG=/usr/sbin/syslog-ng-ctl
logmngr_init() {
create_config_file
config_load logmngr
local enabled
config_get enabled globals enable
if [ "$enabled" == "0" ]; then
return
fi
create_option_section
handle_action_section
apply_config_file
if [ -f /lib/logmngr/logrotate.sh ]; then
logrotate_init
fi
procd_open_instance logmngr
procd_set_param command $PROG reload
procd_close_instance
}

View File

@@ -1,16 +0,0 @@
#!/bin/sh /etc/rc.common
START=12
STOP=89
USE_PROCD=1
. /lib/functions.sh
include /lib/logmngr
start_service() {
logmngr_init
}
service_triggers() {
procd_add_reload_trigger logmngr
}

View File

@@ -1,108 +0,0 @@
#!/bin/sh
# Shell script compatibility wrapper for /sbin/logread
#
# Copyright (C) 2019 Dirk Brenken <dev@brenken.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
. /lib/functions.sh
# use /var/log/messages as default
logfile="/var/log/messages"
handle_log_file() {
local section="$1"
local enabled
config_get enabled $section enable
if [ "$enabled" == 0 ]; then
return
fi
local file
config_get file $section file
if [ -z "$file" ]; then
return
fi
logfile="$file"
}
config_load logmngr
config_get logmngr_enabled globals enable
if [ "$logmngr_enabled" == "0" ]; then
printf "%s\n" "Error: logmngr is not enabled!"
exit 2
fi
# treat the last enabled log_file as logfile
config_foreach handle_log_file log_file
if [ ! -f "${logfile}" ]
then
printf "%s\n" "Error: logfile $logfile not found!"
exit 2
fi
usage()
{
printf "%s\n" "Usage: logread [options]"
printf "%s\n" "Options:"
printf "%5s %-10s%s\n" "-l" "<count>" "Got only the last 'count' messages"
printf "%5s %-10s%s\n" "-e" "<pattern>" "Filter messages with a regexp"
printf "%5s %-10s%s\n" "-f" "" "Follow log messages"
printf "%5s %-10s%s\n" "-h" "" "Print this help message"
}
if [ -z "${1}" ]
then
cat "${logfile}"
exit 0
else
while [ "${1}" ]
do
case "${1}" in
-l)
shift
count="${1//[^0-9]/}"
tail -n "${count:-50}" "${logfile}"
exit 0
;;
-e)
shift
pattern="${1}"
grep -E "${pattern}" "${logfile}"
exit 0
;;
-f)
tail -f "${logfile}"
exit 0
;;
-fe)
shift
pattern="${1}"
tail -f "${logfile}" | grep -E "${pattern}"
exit 0
;;
-h|*)
usage
exit 1
;;
esac
shift
done
fi

View File

@@ -5,9 +5,9 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=map-agent
PKG_VERSION:=6.1.1.12
PKG_VERSION:=6.1.1.2
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
PKG_SOURCE_VERSION:=2f83ba1aa0655c07a2d3e40df034f4f81139a571
PKG_SOURCE_VERSION:=d0ff409ffe6bb03c97e14b34932f437f58f4e241
PKG_MAINTAINER:=Jakob Olsson <jakob.olsson@iopsys.eu>
PKG_LICENSE:=BSD-3-Clause

View File

@@ -199,16 +199,11 @@ map_genconf () {
else
uci set mapcontroller.controller.enabled="1"
[ "$disable_mlo" == "1" ] && {
mapcontroller_remove_mld() {
uci delete mapcontroller.$1
mapcontroller_disable_mld() {
uci set mapcontroller.$1.enabled='0'
}
mapcontroller_remove_mld_id() {
uci delete mapcontroller.$1.mld_id
}
config_load mapcontroller
config_foreach mapcontroller_remove_mld mld
config_foreach mapcontroller_remove_mld_id ap
config_foreach mapcontroller_disable_mld mld
}
fi
uci -q commit mapcontroller

View File

@@ -5,9 +5,9 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=map-controller
PKG_VERSION:=6.1.1.8
PKG_VERSION:=6.1.1.2
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
PKG_SOURCE_VERSION:=c947d0783032cc2eb6e627bf8d1d32c38196ab8b
PKG_SOURCE_VERSION:=480a5304591632729212408d77d220f707cb9d6b
PKG_MAINTAINER:=Jakob Olsson <jakob.olsson@iopsys.eu>
LOCAL_DEV=0

View File

@@ -27,9 +27,6 @@ config sta_steering
option report_rcpi_threshold_2g '80'
option report_rcpi_threshold_5g '96'
option report_rcpi_threshold_6g '96'
option steer_retry_int '30'
option steer_int '180'
option steer_disable_int '600'
###################
# Default AP sections credentials will by updated

View File

@@ -128,14 +128,14 @@ configure_mcpd_snooping() {
config_snooping_common_params $protocol $igmp_s_version $igmp_s_robustness $igmp_s_mode
config_mcast_querier_params $protocol $igmp_s_query_interval $igmp_s_q_resp_interval $igmp_s_last_mem_q_int
config_snooping_upstream_interface "$igmp_s_iface"
config_snooping_on_bridge $protocol "$igmp_s_iface" $igmp_s_mode
config_snooping_on_bridge $protocol $igmp_s_iface $igmp_s_mode
exceptions=$igmp_s_exceptions
fast_leave=$igmp_s_fast_leave
elif [ "$protocol" == "mld" ]; then
config_snooping_common_params $protocol $mld_s_version $mld_s_robustness $mld_s_mode
config_mcast_querier_params $protocol $mld_s_query_interval $mld_s_q_resp_interval $mld_s_last_mem_q_int
config_snooping_upstream_interface "$mld_s_iface"
config_snooping_on_bridge $protocol "$mld_s_iface" $mld_s_mode
config_snooping_on_bridge $protocol $mld_s_iface $mld_s_mode
exceptions=$mld_s_exceptions
fast_leave=$mld_s_fast_leave
fi
@@ -171,14 +171,14 @@ configure_mcpd_proxy() {
config_snooping_common_params $protocol $igmp_p_version $igmp_p_robustness $igmp_p_mode
config_mcast_querier_params $protocol $igmp_query_interval $igmp_q_resp_interval $igmp_last_mem_q_int
config_mcast_proxy_interface $protocol "$igmp_p_up_interfaces"
config_snooping_on_bridge $protocol "$igmp_p_down_interfaces" $igmp_p_mode
config_snooping_on_bridge $protocol $igmp_p_down_interfaces $igmp_p_mode
fast_leave=$igmp_fast_leave
exceptions=$igmp_p_exceptions
elif [ "$protocol" == "mld" ]; then
config_snooping_common_params $protocol $mld_p_version $mld_p_robustness $mld_p_mode
config_mcast_querier_params $protocol $mld_query_interval $mld_q_resp_interval $mld_last_mem_q_int
config_mcast_proxy_interface $protocol "$mld_p_up_interfaces"
config_snooping_on_bridge $protocol "$mld_p_down_interfaces" $mld_p_mode
config_snooping_on_bridge $protocol $mld_p_down_interfaces $mld_p_mode
fast_leave=$mld_fast_leave
exceptions=$mld_p_exceptions
fi

View File

@@ -1,24 +0,0 @@
#!/bin/sh
[ "$LINK" = "up" -a -n "$PORT" ] || exit 0
compare_mcast_snooping_interface() {
local interface dev running
config_get interface "$1" interface
for dev in $interface; do
if [ "$PORT" = "$dev" ]; then
running=$(ubus call service list '{"name": "mcast"}' | jsonfilter -e '@.mcast.instances')
if [ -z "${running}" ]; then
/etc/init.d/mcast start
else
ubus call uci commit '{"config":"mcast"}'
fi
exit
fi
done
}
config_load mcast
config_foreach compare_mcast_snooping_interface "snooping"

View File

@@ -1,22 +1,27 @@
#!/bin/sh
[ "$ACTION" = "ifup" -a -n "$INTERFACE" ] || exit 0
[ "$ACTION" = ifup ] || exit 0
. /lib/functions/network.sh
network_get_device l3device "$INTERFACE"
network_get_device l3device $INTERFACE
[ -n "$l3device" ] || exit 0
compare_mcast_proxy_upstream() {
local upstream dev running
local upstream
local mode="$2"
config_get upstream "$1" upstream_interface
if [ "$mode" == "proxy" ]; then
config_get upstream $1 upstream_interface
else
config_get upstream $1 interface
fi
for dev in $upstream; do
if [ "$l3device" = "$dev" ]; then
if [ "$l3device" == "$dev" ]; then
running=$(ubus call service list '{"name": "mcast"}' | jsonfilter -e '@.mcast.instances')
if [ -z "${running}" ]; then
if [ -z "${running}" ];then
/etc/init.d/mcast start
else
ubus call uci commit '{"config":"mcast"}'
@@ -27,4 +32,5 @@ compare_mcast_proxy_upstream() {
}
config_load mcast
config_foreach compare_mcast_proxy_upstream "proxy"
config_foreach compare_mcast_proxy_upstream "proxy" "proxy"
config_foreach compare_mcast_proxy_upstream "snooping" "snooping"

View File

@@ -85,8 +85,9 @@ config_mcproxy_interfaces() {
echo -e "pinstance main:$str_up ==>$str_down;\n" >> $CONFFILE
local filter=""
for excp in $exceptions; do
local filter=""
case $excp in
*/*)
ip_start="$(ipcalc.sh $excp | grep IP | awk '{print substr($0,4)}')"
@@ -97,16 +98,16 @@ config_mcproxy_interfaces() {
filter="$filter ($excp | *)"
;;
esac
done
for upstream in $str_up; do
echo "pinstance main upstream $upstream in blacklist table{$filter };" >> $CONFFILE
echo "pinstance main upstream $upstream out blacklist table{$filter };" >> $CONFFILE
done
for upstream in $str_up; do
echo "pinstance main upstream $upstream in blacklist table{$filter };" >> $CONFFILE
echo "pinstance main upstream $upstream out blacklist table{$filter };" >> $CONFFILE
done
for downstream in $str_down; do
echo "pinstance main downstream $downstream in blacklist table{$filter };" >> $CONFFILE
echo "pinstance main downstream $downstream out blacklist table{$filter };" >> $CONFFILE
for downstream in $str_down; do
echo "pinstance main downstream $downstream in blacklist table{$filter };" >> $CONFFILE
echo "pinstance main downstream $downstream out blacklist table{$filter };" >> $CONFFILE
done
done
}
@@ -261,6 +262,10 @@ config_mcproxy_instance() {
downstreams=$igmp_p_down_interfaces
mcast_mode=$igmp_p_mode
# mcproxy reserves two multicast subscriptions for igmp router service groups
local mg=$(cat /proc/sys/net/ipv4/igmp_max_memberships)
mg=$((mg+2))
echo $mg > /proc/sys/net/ipv4/igmp_max_memberships
elif [ "$protocol" == "mld" ]; then
case "$version" in
[1-2])

View File

@@ -1,48 +0,0 @@
#
# Copyright (C) 2020-2024 iopsys
#
include $(TOPDIR)/rules.mk
PKG_NAME:=netmngr
PKG_VERSION:=1.0.1
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/network/netmngr.git
PKG_SOURCE_VERSION:=cc1c8b3853fdab23a2a9a0001a94d7908edb6a41
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif
PKG_LICENSE:=BSD-3-Clause
PKG_LICENSE_FILES:=LICENSE
include $(INCLUDE_DIR)/package.mk
include ../bbfdm/bbfdm.mk
define Package/netmngr
CATEGORY:=Utilities
TITLE:=Network Data Model Support
DEPENDS:=+libuci +libubox +libubus +libblobmsg-json +libbbfdm-api
endef
define Package/netmngr/description
Package to add Network data model support.
endef
MAKE_PATH:=src
define Package/netmngr/install
$(BBFDM_INSTALL_MS_DM) -u Network $(PKG_BUILD_DIR)/src/libnetmngr.so $(1) $(PKG_NAME)
$(BBFDM_INSTALL_CORE_PLUGIN) $(PKG_BUILD_DIR)/src/libinterface_stack.so $(1)
endef
ifeq ($(LOCAL_DEV),1)
define Build/Prepare
$(CP) ~/git/netmngr/* $(PKG_BUILD_DIR)/
endef
endif
$(eval $(call BuildPackage,netmngr))

View File

@@ -1,109 +0,0 @@
# Creating Custom Netmodes in IOWRT
This guide provides developers with detailed instructions on how to create and manage custom network modes (netmodes) in IOWRT. The `netmode` script allows for flexible network configuration, and developers can define their own modes by structuring the necessary files and scripts within the `/etc/netmodes/` directory.
## Table of Contents
1. [Overview of Netmodes](#overview-of-netmodes)
2. [Directory Structure](#directory-structure)
3. [Creating a Custom Netmode](#creating-a-custom-netmode)
- [Step 1: Pre-Execution Scripts](#step-1-pre-execution-scripts)
- [Step 2: UCI Configuration Files](#step-2-uci-configuration-files)
- [Step 3: Custom Execution Scripts](#step-3-custom-execution-scripts)
- [Step 4: Post-Execution Scripts](#step-4-post-execution-scripts)
4. [Enabling and Switching Netmodes](#enabling-and-switching-netmodes)
## Overview of Netmodes
Netmodes in IOWRT provide a way to switch between different network configurations based on the needs of the environment. Developers can create custom netmodes by organizing scripts and configuration files in specific directories under `/etc/netmodes/<NETMODE_NAME>`.
## Directory Structure
A custom netmode is defined within the `/etc/netmodes/<NETMODE_NAME>` directory, which should contain the following subdirectories:
- **/lib/netmode/pre/**: Generic scripts executed before the netmode-specific configurations are applied.
- **/etc/netmodes/<NETMODE_NAME>/uci/**: Contains UCI configuration files that will be copied to `/etc/config/` during the application of the netmode.
- **/etc/netmodes/<NETMODE_NAME>/scripts/**: Custom scripts specific to the netmode that are executed after the UCI configurations are applied.
- **/lib/netmode/post/**: Generic scripts executed after the netmode-specific configurations are completed.
## Creating a Custom Netmode
To create a new netmode, follow these steps:
### Step 1: Pre-Execution Scripts
Scripts located in `/lib/netmode/pre/` are executed before any mode-specific actions. These are typically used for preparing the system or cleaning up configurations from the previous netmode.
- **Create Pre-Execution Scripts**:
- Place your generic pre-execution scripts in `/lib/netmode/pre/`.
- Example script (`/lib/netmode/pre/cleanup.sh`):
```bash
#!/bin/sh
echo "Cleaning up old network configurations..."
# Add commands here
```
### Step 2: UCI Configuration Files
The UCI configuration files stored in `/etc/netmodes/<NETMODE_NAME>/uci/` will be copied to `/etc/config/`, effectively applying the desired network configuration.
- **Place UCI Config Files**:
- Create UCI configuration files under `/etc/netmodes/<NETMODE_NAME>/uci/`.
- Example (`/etc/netmodes/bridge/uci/network`):
````bash
config device 'br_lan'
option name 'br-lan'
option type 'bridge'
option multicast_to_unicast '0'
option bridge_empty '1'
list ports 'eth1'
list ports 'eth3'
list ports 'eth4'
config interface 'lan'
option proto 'dhcp'
option device 'br-lan'
option force_link '1'
option reqopts '43 125'
````
### Step 3: Custom Execution Scripts
After the UCI files are applied, any scripts in `/etc/netmodes/<NETMODE_NAME>/scripts/` are executed. These can be used to perform additional configuration tasks that are specific to the netmode.
- **Create Custom Scripts**:
- Add scripts to `/etc/netmodes/<NETMODE_NAME>/scripts/`.
- Example (`/etc/netmodes/bridge/scripts/setup_bridge.sh`):
```bash
#!/bin/sh
echo "Setting up bridge mode..."
# Additional configuration commands here
```
### Step 4: Post-Execution Scripts
Finally, the generic scripts in `/lib/netmode/post/` are executed. These scripts typically finalize the setup or perform any necessary cleanups.
- **Create Post-Execution Scripts**:
- Place scripts in `/lib/netmode/post/`.
- Example script (`/lib/netmode/post/restart_services.sh`):
```bash
#!/bin/sh
echo "Restarting network services..."
# Add commands here
```
## Enabling and Switching Netmodes
The netmode mechanism can be enabled or disabled via the UCI configuration, and you can switch between netmodes using UCI commands.
- **Enable Netmode**:
```bash
uci set netmode.global.enabled=1
uci commit netmode
```
- **Switch Netmode**:
```bash
uci set netmode.global.mode='<NETMODE_NAME>'
uci commit netmode
```

View File

@@ -22,11 +22,6 @@ config OBUSPA_CONTROLLER_MTP_VERIFY
config OBUSPA_ENABLE_TEST_CONTROLLER
bool "Adds a test controller by default"
default n
select OBUSPA_ENABLE_TEST_CONTROLLER_LOCAL
config OBUSPA_ENABLE_TEST_CONTROLLER_LOCAL
bool "Adds a test controller by default (local access only)"
default n
config OBUSPA_MAX_CONTROLLERS_NUM
int "The maximum number of controllers to be supported"

View File

@@ -5,13 +5,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=obuspa
PKG_VERSION:=8.0.8.6
PKG_VERSION:=8.0.1.6
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/bbf/obuspa.git
PKG_SOURCE_VERSION:=0eaf2364f13c71dad053a90eacba77830f724ca3
PKG_SOURCE_VERSION:=0997eebe269d766eb738b80e4d5ccd40baf79090
PKG_MAINTAINER:=Vivek Dutta <vivek.dutta@iopsys.eu>
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
@@ -32,9 +32,7 @@ define Package/obuspa
SUBMENU:=TRx69
TITLE:=USP agent
MENU:=1
DEPENDS:=+libopenssl +libuci +libblobmsg-json +libcurl +libsqlite3 +libubox +libubus +libmosquitto-ssl +libwebsockets-openssl +ca-certificates \
+OBUSPA_ENABLE_TEST_CONTROLLER_LOCAL:mosquitto-ssl +OBUSPA_ENABLE_TEST_CONTROLLER_LOCAL:mosquitto-client-ssl \
+OBUSPA_ENABLE_TEST_CONTROLLER:mosquitto-auth-shadow +libbbfdm-api +libjson-c
DEPENDS:=+libopenssl +libuci +libblobmsg-json +libcurl +libsqlite3 +libubox +libubus +libmosquitto-ssl +libwebsockets-openssl
endef
define Package/obuspa/description
@@ -127,11 +125,8 @@ define Package/obuspa/install
$(INSTALL_BIN) ./files/etc/uci-defaults/obuspa-set-dhcp-option $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/udhcpc.user.d/udhcpc_obuspa_opt125.user $(1)/etc/udhcpc.user.d/udhcpc_obuspa_opt125.user
$(INSTALL_BIN) ./files/obuspa.hotplug $(1)/etc/hotplug.d/iface/21-obuspa
$(BBFDM_INSTALL_MS_DM) $(PKG_BUILD_DIR)/libuspagentdm.so $(1) $(PKG_NAME)
$(BBFDM_INSTALL_CORE_PLUGIN) ./files/etc/bbfdm/json/USPAgent.json $(1)
ifeq ($(CONFIG_OBUSPA_ENABLE_TEST_CONTROLLER),y)
$(INSTALL_BIN) ./files/etc/uci-defaults/54-test-usp-remote $(1)/etc/uci-defaults/
endif
ifeq ($(CONFIG_OBUSPA_ENABLE_TEST_CONTROLLER_LOCAL),y)
$(INSTALL_BIN) ./files/etc/init.d/usptest $(1)/etc/init.d/
$(INSTALL_BIN) ./files/etc/uci-defaults/55-test-usp-controller $(1)/etc/uci-defaults/
endif

File diff suppressed because it is too large Load Diff

View File

@@ -352,9 +352,9 @@ configure_controller()
db_set "${BASEPATH}.PeriodicNotifInterval" "${PeriodicNotifInterval}"
fi
if [ -n "${SessionMode}" ]; then
db_set "${BASEPATH}.E2ESession.SessionMode" "${SessionMode}"
fi
#if [ -n "${SessionMode}" ]; then
# db_set "${BASEPATH}.E2ESession.SessionMode" "${SessionMode}"
#fi
if [ -n "${assigned_role_name}" ]; then
AssignedRole=$(get_role_index "${assigned_role_name}")

View File

@@ -2,7 +2,6 @@
"dmcaching_exclude": [
"Device.Hosts.Host.",
"Device.IEEE1905.",
"Device.WiFi.DataElements.",
"Device.NAT.InterfaceSetting."
"Device.WiFi.DataElements."
]
}

View File

@@ -1,20 +0,0 @@
#!/bin/sh
. /lib/functions.sh
if [ ! -f "/etc/config/mosquitto" ]; then
echo "Local mosquitto broker not available"
return 0
fi
add_usp_test()
{
uci_add mosquitto listener usptest
uci_set mosquitto usptest enabled 1
uci_set mosquitto usptest port '9001'
uci_set mosquitto usptest protocol 'websockets'
uci_set mosquitto usptest auth_plugin '/usr/lib/mosquitto_auth_shadow.so'
}
# Install test MQTT over WS listener
add_usp_test

View File

@@ -40,6 +40,16 @@ add_obuspa_test_controller()
uci_set obuspa testcontroller assigned_role_name 'full_access'
}
add_usp_test()
{
uci_add mosquitto listener usptest
uci_set mosquitto usptest enabled 1
uci_set mosquitto usptest port '9001'
uci_set mosquitto usptest protocol 'websockets'
uci_set mosquitto usptest require_certificates '0'
uci_set mosquitto usptest auth_plugin '/usr/lib/mosquitto_auth_shadow.so'
}
add_obuspa_config()
{
uci_add mosquitto listener obuspa
@@ -50,8 +60,11 @@ add_obuspa_config()
}
# Install test usp controller config
uci_load mosquitto
add_usp_test
add_obuspa_config
uci_load obuspa
add_obuspa_test_mtp
add_obuspa_test_mqtt
add_obuspa_test_controller

View File

@@ -1,19 +1,23 @@
diff --git a/src/core/device.h b/src/core/device.h
index 5e367b7..db154a5 100644
--- a/src/core/device.h
+++ b/src/core/device.h
@@ -336,6 +336,10 @@ void DEVICE_CONTROLLER_SetInheritedRole(
int DEVICE_CONTROLLER_CountEnabledWebsockClientConnections(void);
#endif
@@ -330,6 +330,10 @@ int DEVICE_MTP_ValidateMqttReference(dm_req_t *req, char *value);
void DEVICE_CONTROLLER_SetRolesFromMqtt(int mqtt_instance, int role_instance);
char *DEVICE_CONTROLLER_GetControllerTopic(int mqtt_instance);
+#ifdef OBUSPA_CONTROLLER_MTP_VERIFY
+bool DEVICE_CONTROLLER_IsMTPAllowed(char *endpoint_id, mtp_conn_t *mpc);
+bool DEVICE_CONTROLLER_IsMTPAllowed(char *endpoint_id, mtp_reply_to_t *mrt);
+#endif
+
#ifndef REMOVE_USP_BROKER
int DEVICE_SUBSCRIPTION_RouteNotification(Usp__Msg *usp, int instance);
bool DEVICE_SUBSCRIPTION_MarkVendorLayerSubs(int broker_instance, subs_notify_t notify_type, char *path, int group_id);
diff --git a/src/core/device_controller.c b/src/core/device_controller.c
index 97ca11d..19c91f1 100644
--- a/src/core/device_controller.c
+++ b/src/core/device_controller.c
@@ -967,6 +967,78 @@ int DEVICE_CONTROLLER_QueueBinaryMessage
@@ -952,6 +952,78 @@ int DEVICE_CONTROLLER_QueueBinaryMessage(mtp_send_item_t *msi, char *endpoint_id
return USP_ERR_OK;
}
@@ -26,12 +30,12 @@
+** This function is used by ValidateUspRecord() to determine whether to process a received USP message
+**
+** \param endpoint_id - Endpoint ID of controller that sent a USP message
+** \param mpc - pointer to structure specifying on which MTP the message was received
+** \param mrt - pointer to structure specifying on which MTP the message was received
+**
+** \return true if the MTP is allowed, false otherwise
+**
+**************************************************************************/
+bool DEVICE_CONTROLLER_IsMTPAllowed(char *endpoint_id, mtp_conn_t *mpc)
+bool DEVICE_CONTROLLER_IsMTPAllowed(char *endpoint_id, mtp_reply_to_t *mrt)
+{
+ controller_t *cont = FindEnabledControllerByEndpointId(endpoint_id);
+ controller_mtp_t *mtp;
@@ -42,18 +46,18 @@
+ return false;
+ }
+
+ mtp = FindFirstEnabledMtp(cont, mpc->protocol);
+ mtp = FindFirstEnabledMtp(cont, mrt->protocol);
+
+#ifdef ENABLE_WEBSOCKETS
+ // Allow websocket server if no other MTP is configured
+ if ((mpc->protocol == kMtpProtocol_WebSockets) && (mpc->ws.serv_conn_id != INVALID))
+ if ((mrt->protocol == kMtpProtocol_WebSockets) && (mrt->wsserv_conn_id != INVALID))
+ {
+ return mtp == NULL;
+ }
+#endif
+
+ // Disallow if there is no MTP configured with matching protocol
+ if ((mtp == NULL) || (mtp->protocol != mpc->protocol))
+ if ((mtp == NULL) || (mtp->protocol != mrt->protocol))
+ {
+ return false;
+ }
@@ -63,7 +67,7 @@
+ {
+#ifndef DISABLE_STOMP
+ case kMtpProtocol_STOMP:
+ return mtp->stomp_connection_instance == mpc->stomp.instance;
+ return mtp->stomp_connection_instance == mrt->stomp_instance;
+#endif
+
+#ifdef ENABLE_COAP
@@ -73,12 +77,12 @@
+
+#ifdef ENABLE_MQTT
+ case kMtpProtocol_MQTT:
+ return mtp->mqtt_connection_instance == mpc->mqtt.instance;
+ return mtp->mqtt_connection_instance == mrt->mqtt_instance;
+#endif
+
+#ifdef ENABLE_WEBSOCKETS
+ case kMtpProtocol_WebSockets:
+ return (mpc->ws.client_cont_instance == cont->instance) && (mpc->ws.client_mtp_instance == mtp->instance);
+ return (mrt->wsclient_cont_instance == cont->instance) && (mrt->wsclient_mtp_instance == mtp->instance);
+#endif
+ default:
+ TERMINATE_BAD_CASE(mtp->protocol);
@@ -92,15 +96,17 @@
/*********************************************************************//**
**
** DEVICE_CONTROLLER_IsMTPConfigured
diff --git a/src/core/msg_handler.c b/src/core/msg_handler.c
index 2a04d39..0b3074b 100644
--- a/src/core/msg_handler.c
+++ b/src/core/msg_handler.c
@@ -1210,6 +1210,15 @@ int ValidateUspRecord(UspRecord__Record
@@ -1206,6 +1206,15 @@ int ValidateUspRecord(UspRecord__Record *rec, mtp_conn_t *mtpc)
usp_service_instance = USP_BROKER_GetUspServiceInstance(rec->from_id, 0);
#endif
+#ifdef OBUSPA_CONTROLLER_MTP_VERIFY
+ // Exit if the controller is not allowed to use the MTP on which the message was received
+ if (DEVICE_CONTROLLER_IsMTPAllowed(rec->from_id, mtpc) == false)
+ if (DEVICE_CONTROLLER_IsMTPAllowed(rec->from_id, mrt) == false)
+ {
+ USP_ERR_SetMessage("%s: Ignoring message from endpoint_id=%s (unauthorized MTP)", __FUNCTION__, rec->from_id);
+ return USP_ERR_PERMISSION_DENIED;

View File

@@ -1,6 +1,8 @@
diff --git a/src/core/cli_server.c b/src/core/cli_server.c
index 701cbd9..103361e 100644
--- a/src/core/cli_server.c
+++ b/src/core/cli_server.c
@@ -785,10 +785,6 @@ int ExecuteCli_Get(char *arg1, char *arg
@@ -733,10 +733,6 @@ int ExecuteCli_Get(char *arg1, char *arg2, char *usage)
USP_ASSERT(gge->value != NULL);
SendCliResponse("%s => %s\n", gge->path, gge->value);
}
@@ -11,3 +13,37 @@
}
GROUP_GET_VECTOR_Destroy(&ggv);
diff --git a/src/core/handle_get.c b/src/core/handle_get.c
index e1055e0..16b9ceb 100755
--- a/src/core/handle_get.c
+++ b/src/core/handle_get.c
@@ -260,26 +260,16 @@ void FormPathExprResponse(int get_expr_index, char *path_expr, get_expr_info_t *
return;
}
- // If there was an error in getting any of the parameters associated with the path expression,
- // then just add the first error, without any of the parameter values, for this path expression result
- for (i=0; i < gi->num_entries; i++)
- {
- gge = &ggv->vector[gi->index + i];
- if (gge->err_code != USP_ERR_OK)
- {
- (void)AddGetResp_ReqPathRes(resp, path_expr, gge->err_code, gge->err_msg);
- return;
- }
- }
-
// If the code gets here, then the value of all parameters were retrieved successfully, so add their values to the result_params
req_path_result = AddGetResp_ReqPathRes(resp, path_expr, USP_ERR_OK, "");
for (i=0; i < gi->num_entries; i++)
{
gge = &ggv->vector[gi->index + i];
- // Simple format contains a resolved_path_result for every object (and sub object)
- AddResolvedPathResult(req_path_result, gge->path, gge->value);
+ if (gge->err_code == USP_ERR_OK) {
+ // Simple format contains a resolved_path_result for every object (and sub object)
+ AddResolvedPathResult(req_path_result, gge->path, gge->value);
+ }
}
}

View File

@@ -1,6 +1,6 @@
--- a/src/core/data_model.c
+++ b/src/core/data_model.c
@@ -1316,7 +1316,7 @@ int DATA_MODEL_NotifyInstanceAdded(char
@@ -1243,7 +1243,7 @@ int DATA_MODEL_NotifyInstanceAdded(char
// Exit if instance already exists - nothing to do
if (exists)
{
@@ -9,7 +9,7 @@
return USP_ERR_CREATION_FAILURE;
}
@@ -1404,7 +1404,7 @@ int DATA_MODEL_NotifyInstanceDeleted(cha
@@ -1328,7 +1328,7 @@ int DATA_MODEL_NotifyInstanceDeleted(cha
// Exit if instance does not exist - nothing to do
if (exists == false)
{

View File

@@ -1,14 +1,14 @@
--- a/src/core/data_model.c
+++ b/src/core/data_model.c
@@ -160,6 +160,7 @@ int SetVendorParam(dm_node_t *node, char
@@ -137,6 +137,7 @@ int GetVendorParam(dm_node_t *node, char
int SetVendorParam(dm_node_t *node, char *path, dm_instances_t *inst, char *value, dm_req_t *req);
double_link_t *FindLinkToFirstObject(double_linked_list_t *list);
int OverrideNodeType(dm_node_t *node, dm_node_type_t type, char *schema_path, dm_instances_t *inst);
+extern bool is_running_cli_local_command;
/*********************************************************************//**
**
** DATA_MODEL_Init
@@ -267,7 +268,9 @@ int DATA_MODEL_Init(void)
@@ -224,7 +225,9 @@ int DATA_MODEL_Init(void)
}
// Set the default values of OUI, Serial Number and (LocalAgent) EndpointID, and cache EndpointID

View File

@@ -1,6 +1,8 @@
diff --git a/src/core/bdc_exec.c b/src/core/bdc_exec.c
index 6b5c11d..3670361 100644
--- a/src/core/bdc_exec.c
+++ b/src/core/bdc_exec.c
@@ -549,9 +549,14 @@ int StartSendingReport(bdc_connection_t
@@ -548,9 +548,14 @@ int StartSendingReport(bdc_connection_t *bc)
// Set the list of headers
bc->headers = NULL;
bc->headers = curl_slist_append(bc->headers, "Content-Type: application/json; charset=UTF-8");
@@ -16,6 +18,8 @@
bc->headers = curl_slist_append(bc->headers, "Content-Encoding: gzip");
}
diff --git a/src/core/bdc_exec.h b/src/core/bdc_exec.h
index c58c6d5..ff37a2d 100644
--- a/src/core/bdc_exec.h
+++ b/src/core/bdc_exec.h
@@ -53,6 +53,6 @@ void BDC_EXEC_ScheduleExit(void);
@@ -26,9 +30,11 @@
+#define BDC_FLAG_HEADER_OBJ_HIER 0x00000008 // If set, report format in header would be ObjectHierarchy otherwise NameValuePair
#endif
diff --git a/src/core/device_bulkdata.c b/src/core/device_bulkdata.c
index 5b1aff2..a7d1b3e 100755
--- a/src/core/device_bulkdata.c
+++ b/src/core/device_bulkdata.c
@@ -71,7 +71,8 @@
@@ -68,7 +68,8 @@
//------------------------------------------------------------------------------
// Definitions for formats that we support
#define BULKDATA_ENCODING_TYPE "JSON"
@@ -38,7 +44,7 @@
// Definitions for Device.BulkData.Profile.{i}.JSONEncoding.ReportTimestamp
@@ -162,6 +163,7 @@ typedef struct
@@ -159,6 +160,7 @@ typedef struct
char compression[9];
char method[9];
bool use_date_header;
@@ -46,7 +52,7 @@
} profile_ctrl_params_t;
//------------------------------------------------------------------------------
@@ -236,7 +238,7 @@ bulkdata_profile_t *bulkdata_find_free_p
@@ -233,7 +235,7 @@ bulkdata_profile_t *bulkdata_find_free_profile(void);
bulkdata_profile_t *bulkdata_find_profile(int profile_id);
int bulkdata_calc_report_map(bulkdata_profile_t *bp, kv_vector_t *report_map);
int bulkdata_reduce_to_alt_name(char *spec, char *path, char *alt_name, char *out_buf, int buf_len);
@@ -55,7 +61,7 @@
unsigned char *bulkdata_compress_report(profile_ctrl_params_t *ctrl, char *input_buf, int input_len, int *p_output_len);
int bulkdata_schedule_sending_http_report(profile_ctrl_params_t *ctrl, bulkdata_profile_t *bp, unsigned char *json_report, int report_len);
int bulkdata_start_profile(bulkdata_profile_t *bp);
@@ -310,7 +312,7 @@ int DEVICE_BULKDATA_Init(void)
@@ -307,7 +309,7 @@ int DEVICE_BULKDATA_Init(void)
err |= USP_REGISTER_DBParam_ReadWrite("Device.BulkData.Profile.{i}.Parameter.{i}.Reference", "", Validate_BulkDataReference, NULL, DM_STRING);
// Device.BulkData.Profile.{i}.JSONEncoding
@@ -64,7 +70,7 @@
err |= USP_REGISTER_DBParam_ReadWrite("Device.BulkData.Profile.{i}.JSONEncoding.ReportTimestamp", BULKDATA_JSON_TIMESTAMP_FORMAT_EPOCH, Validate_BulkDataReportTimestamp, NULL, DM_STRING);
// Device.BulkData.Profile.{i}.HTTP
@@ -698,9 +700,11 @@ int Validate_BulkDataReference(dm_req_t
@@ -661,9 +663,11 @@ int Validate_BulkDataReference(dm_req_t *req, char *value)
int Validate_BulkDataReportFormat(dm_req_t *req, char *value)
{
// Exit if trying to set a value outside of the range we accept
@@ -78,7 +84,7 @@
return USP_ERR_INVALID_VALUE;
}
@@ -2008,6 +2012,14 @@ int bulkdata_platform_get_profile_contro
@@ -1974,6 +1978,14 @@ int bulkdata_platform_get_profile_control_params(bulkdata_profile_t *bp, profile
return err;
}
@@ -93,7 +99,7 @@
return USP_ERR_OK;
}
@@ -2283,7 +2295,7 @@ void bulkdata_process_profile_http(bulkd
@@ -2249,7 +2261,7 @@ void bulkdata_process_profile_http(bulkdata_profile_t *bp)
}
// Exit if unable to generate the report
@@ -102,7 +108,7 @@
if (json_report == NULL)
{
USP_ERR_SetMessage("%s: bulkdata_generate_json_report failed", __FUNCTION__);
@@ -2333,7 +2345,8 @@ void bulkdata_process_profile_usp_event(
@@ -2299,7 +2311,8 @@ void bulkdata_process_profile_usp_event(bulkdata_profile_t *bp)
kv_pair_t kv;
report_t *cur_report;
char *json_report;
@@ -112,7 +118,7 @@
// Exit if the MTP has not been connected to successfully after bootup
// This is to prevent BDC events being enqueued before the Boot! event is sent (the Boot! event is only sent after successfully connecting to the MTP).
@@ -2350,6 +2363,14 @@ void bulkdata_process_profile_usp_event(
@@ -2316,6 +2329,14 @@ void bulkdata_process_profile_usp_event(bulkdata_profile_t *bp)
return;
}
@@ -127,7 +133,7 @@
// When sending via USP events, only one report is ever sent in each USP event
// So ensure all retained reports are removed. NOTE: Clearing the reports here is only necessary when switching protocol from HTTP to USP event, and where HTTP had some unsent reports
bulkdata_clear_retained_reports(bp);
@@ -2367,7 +2388,7 @@ void bulkdata_process_profile_usp_event(
@@ -2333,7 +2354,7 @@ void bulkdata_process_profile_usp_event(bulkdata_profile_t *bp)
bp->num_retained_reports = 1;
// Exit if unable to generate the report
@@ -136,7 +142,7 @@
if (json_report == NULL)
{
USP_ERR_SetMessage("%s: bulkdata_generate_json_report failed", __FUNCTION__);
@@ -2579,21 +2600,7 @@ int bulkdata_reduce_to_alt_name(char *sp
@@ -2545,21 +2566,7 @@ int bulkdata_reduce_to_alt_name(char *spec, char *path, char *alt_name, char *ou
return USP_ERR_OK;
}
@@ -159,7 +165,7 @@
{
JsonNode *top; // top of report
JsonNode *array; // array of reports (retained + current)
@@ -2608,7 +2615,6 @@ char *bulkdata_generate_json_report(bulk
@@ -2574,7 +2581,6 @@ char *bulkdata_generate_json_report(bulkdata_profile_t *bp, char *report_timesta
long long value_as_ll;
unsigned long long value_as_ull;
bool value_as_bool;
@@ -167,7 +173,7 @@
int i, j;
char buf[32];
kv_pair_t *kv;
@@ -2631,7 +2637,7 @@ char *bulkdata_generate_json_report(bulk
@@ -2597,7 +2603,7 @@ char *bulkdata_generate_json_report(bulkdata_profile_t *bp, char *report_timesta
}
else if (strcmp(report_timestamp, "ISO-8601")==0)
{
@@ -176,16 +182,16 @@
if (result != NULL)
{
json_append_member(element, "CollectionTime", json_mkstring(buf));
@@ -2690,11 +2696,174 @@ char *bulkdata_generate_json_report(bulk
@@ -2656,11 +2662,174 @@ char *bulkdata_generate_json_report(bulkdata_profile_t *bp, char *report_timesta
json_append_member(top, "Report", array);
// Serialize the JSON tree
- result = json_stringify(top, " ");
+ char *output = json_stringify(top, " ");
// Clean up the JSON tree
json_delete(top); // Other JsonNodes which are children of this top level tree will be deleted
+
+ // Clean up the JSON tree
+ json_delete(top); // Other JsonNodes which are children of this top level tree will be deleted
+
+ return output;
+}
+
@@ -318,10 +324,10 @@
+
+ // Serialize the JSON tree
+ char *output = json_stringify(top, " ");
+
+ // Clean up the JSON tree
+ json_delete(top); // Other JsonNodes which are children of this top level tree will be deleted
+
// Clean up the JSON tree
json_delete(top); // Other JsonNodes which are children of this top level tree will be deleted
+ return output;
+}
+
@@ -352,7 +358,7 @@
return result;
}
@@ -2851,6 +3020,11 @@ int bulkdata_schedule_sending_http_repor
@@ -2817,6 +2986,11 @@ int bulkdata_schedule_sending_http_report(profile_ctrl_params_t *ctrl, bulkdata_
flags |= BDC_FLAG_DATE_HEADER;
}

View File

@@ -1,6 +1,6 @@
--- a/src/core/bdc_exec.c
+++ b/src/core/bdc_exec.c
@@ -548,11 +548,19 @@ int StartSendingReport(bdc_connection_t
@@ -547,11 +547,19 @@
// Set the list of headers
bc->headers = NULL;
@@ -25,7 +25,7 @@
{
--- a/src/core/bdc_exec.h
+++ b/src/core/bdc_exec.h
@@ -53,6 +53,9 @@ void BDC_EXEC_ScheduleExit(void);
@@ -53,6 +53,9 @@
#define BDC_FLAG_PUT 0x00000001 // If set, HTTP PUT should be used instead of HTTP POST when sending the report to the BDC server
#define BDC_FLAG_GZIP 0x00000002 // If set, the reports contants are Gzipped
#define BDC_FLAG_DATE_HEADER 0x00000004 // If set, the date header should be included in the HTTP post.
@@ -38,7 +38,7 @@
#endif
--- a/src/core/device_bulkdata.c
+++ b/src/core/device_bulkdata.c
@@ -70,9 +70,12 @@
@@ -67,9 +67,12 @@
//------------------------------------------------------------------------------
// Definitions for formats that we support
@@ -52,7 +52,7 @@
// Definitions for Device.BulkData.Profile.{i}.JSONEncoding.ReportTimestamp
@@ -156,6 +159,7 @@ static char *profile_push_event_args[] =
@@ -153,6 +156,7 @@
typedef struct
{
int num_retained_failed_reports;
@@ -60,7 +60,7 @@
char report_timestamp[33];
char url[1025];
char username[257];
@@ -164,6 +168,11 @@ typedef struct
@@ -161,6 +165,11 @@
char method[9];
bool use_date_header;
char report_format[20];
@@ -72,7 +72,7 @@
} profile_ctrl_params_t;
//------------------------------------------------------------------------------
@@ -211,6 +220,7 @@ int Validate_BulkDataEncodingType(dm_req
@@ -208,6 +217,7 @@
int Validate_BulkDataReportingInterval(dm_req_t *req, char *value);
int Validate_BulkDataReference(dm_req_t *req, char *value);
int Validate_BulkDataReportFormat(dm_req_t *req, char *value);
@@ -80,7 +80,7 @@
int Validate_BulkDataReportTimestamp(dm_req_t *req, char *value);
int Validate_BulkDataCompression(dm_req_t *req, char *value);
int Validate_BulkDataHTTPMethod(dm_req_t *req, char *value);
@@ -239,6 +249,8 @@ bulkdata_profile_t *bulkdata_find_profil
@@ -236,6 +246,8 @@
int bulkdata_calc_report_map(bulkdata_profile_t *bp, kv_vector_t *report_map);
int bulkdata_reduce_to_alt_name(char *spec, char *path, char *alt_name, char *out_buf, int buf_len);
char *bulkdata_generate_json_report(bulkdata_profile_t *bp, char *report_timestamp, char *report_format);
@@ -89,7 +89,7 @@
unsigned char *bulkdata_compress_report(profile_ctrl_params_t *ctrl, char *input_buf, int input_len, int *p_output_len);
int bulkdata_schedule_sending_http_report(profile_ctrl_params_t *ctrl, bulkdata_profile_t *bp, unsigned char *json_report, int report_len);
int bulkdata_start_profile(bulkdata_profile_t *bp);
@@ -253,6 +265,8 @@ char *bulkdata_platform_calc_uri_query_s
@@ -250,6 +262,8 @@
int bulkdata_platform_get_param_refs(int profile_id, param_ref_vector_t *param_refs);
void bulkdata_expand_param_ref(param_ref_entry_t *pr, group_get_vector_t *ggv);
void bulkdata_append_to_result_map(param_ref_entry_t *pr, group_get_vector_t *ggv, kv_vector_t *report_map);
@@ -98,7 +98,7 @@
/*********************************************************************//**
**
@@ -285,7 +299,7 @@ int DEVICE_BULKDATA_Init(void)
@@ -282,7 +296,7 @@
err |= USP_REGISTER_VendorParam_ReadOnly("Device.BulkData.Status", Get_BulkDataGlobalStatus, DM_STRING);
err |= USP_REGISTER_Param_Constant("Device.BulkData.MinReportingInterval", BULKDATA_MINIMUM_REPORTING_INTERVAL_STR, DM_UINT);
err |= USP_REGISTER_Param_SupportedList("Device.BulkData.Protocols", bdc_protocols, NUM_ELEM(bdc_protocols));
@@ -107,7 +107,7 @@
err |= USP_REGISTER_Param_Constant("Device.BulkData.ParameterWildCardSupported", "true", DM_BOOL);
err |= USP_REGISTER_Param_Constant("Device.BulkData.MaxNumberOfProfiles", BULKDATA_MAX_PROFILES_STR, DM_INT);
err |= USP_REGISTER_Param_Constant("Device.BulkData.MaxNumberOfParameterReferences", "-1", DM_INT);
@@ -300,7 +314,7 @@ int DEVICE_BULKDATA_Init(void)
@@ -297,7 +311,7 @@
err |= USP_REGISTER_DBParam_ReadWrite("Device.BulkData.Profile.{i}.Name", "", NULL, NULL, DM_STRING);
err |= USP_REGISTER_DBParam_ReadWrite("Device.BulkData.Profile.{i}.NumberOfRetainedFailedReports", "0", Validate_NumberOfRetainedFailedReports, NULL, DM_INT);
err |= USP_REGISTER_DBParam_ReadWrite("Device.BulkData.Profile.{i}.Protocol", BULKDATA_PROTOCOL_HTTP, Validate_BulkDataProtocol, NULL, DM_STRING);
@@ -116,7 +116,7 @@
err |= USP_REGISTER_DBParam_ReadWrite("Device.BulkData.Profile.{i}.ReportingInterval", "86400", Validate_BulkDataReportingInterval, NotifyChange_BulkDataReportingInterval, DM_UINT);
err |= USP_REGISTER_DBParam_ReadWrite("Device.BulkData.Profile.{i}.TimeReference", UNKNOWN_TIME_STR, NULL, NotifyChange_BulkDataTimeReference, DM_DATETIME);
@@ -315,6 +329,13 @@ int DEVICE_BULKDATA_Init(void)
@@ -312,6 +326,13 @@
err |= USP_REGISTER_DBParam_ReadWrite("Device.BulkData.Profile.{i}.JSONEncoding.ReportFormat", BULKDATA_JSON_REPORT_FORMAT_NAME_VALUE, Validate_BulkDataReportFormat, NULL, DM_STRING);
err |= USP_REGISTER_DBParam_ReadWrite("Device.BulkData.Profile.{i}.JSONEncoding.ReportTimestamp", BULKDATA_JSON_TIMESTAMP_FORMAT_EPOCH, Validate_BulkDataReportTimestamp, NULL, DM_STRING);
@@ -130,7 +130,7 @@
// Device.BulkData.Profile.{i}.HTTP
err |= USP_REGISTER_DBParam_ReadWrite("Device.BulkData.Profile.{i}.HTTP.URL", "", NULL, NotifyChange_BulkDataURL, DM_STRING);
err |= USP_REGISTER_DBParam_ReadWrite("Device.BulkData.Profile.{i}.HTTP.Username", "", NULL, NULL, DM_STRING);
@@ -594,9 +615,10 @@ int Validate_BulkDataProtocol(dm_req_t *
@@ -591,9 +612,10 @@
int Validate_BulkDataEncodingType(dm_req_t *req, char *value)
{
// Exit if trying to set a value outside of the range we accept
@@ -143,7 +143,7 @@
return USP_ERR_INVALID_VALUE;
}
@@ -713,6 +735,32 @@ int Validate_BulkDataReportFormat(dm_req
@@ -676,6 +698,32 @@
/*********************************************************************//**
**
@@ -176,7 +176,7 @@
** Validate_BulkDataReportTimestamp
**
** Validates Device.BulkData.Profile.{i}.JSONEncoding.ReportTimestamp
@@ -2004,6 +2052,14 @@ int bulkdata_platform_get_profile_contro
@@ -1970,6 +2018,14 @@
return err;
}
@@ -191,7 +191,7 @@
// Exit if unable to get ReportTimestamp
USP_SNPRINTF(path, sizeof(path), "Device.BulkData.Profile.%d.JSONEncoding.ReportTimestamp", bp->profile_id);
err = DATA_MODEL_GetParameterValue(path, ctrl_params->report_timestamp, sizeof(ctrl_params->report_timestamp), 0);
@@ -2020,6 +2076,46 @@ int bulkdata_platform_get_profile_contro
@@ -1986,6 +2042,46 @@
return err;
}
@@ -238,7 +238,7 @@
return USP_ERR_OK;
}
@@ -2256,7 +2352,7 @@ void bulkdata_process_profile_http(bulkd
@@ -2222,7 +2318,7 @@
{
int err;
report_t *cur_report;
@@ -247,7 +247,7 @@
profile_ctrl_params_t ctrl;
unsigned char *compressed_report;
int compressed_len;
@@ -2295,10 +2391,23 @@ void bulkdata_process_profile_http(bulkd
@@ -2261,10 +2357,23 @@
}
// Exit if unable to generate the report
@@ -275,7 +275,7 @@
return;
}
@@ -2307,14 +2416,14 @@ void bulkdata_process_profile_http(bulkd
@@ -2273,14 +2382,14 @@
USP_LOG_Info("BULK DATA: using compression method=%s", ctrl.compression);
if (enable_protocol_trace)
{
@@ -294,7 +294,7 @@
}
// NOTE: From this point on, only the compressed_report exists
@@ -2344,9 +2453,15 @@ void bulkdata_process_profile_usp_event(
@@ -2310,9 +2419,15 @@
kv_vector_t event_args;
kv_pair_t kv;
report_t *cur_report;
@@ -311,7 +311,7 @@
// Exit if the MTP has not been connected to successfully after bootup
// This is to prevent BDC events being enqueued before the Boot! event is sent (the Boot! event is only sent after successfully connecting to the MTP).
@@ -2355,20 +2470,62 @@ void bulkdata_process_profile_usp_event(
@@ -2321,20 +2436,62 @@
goto exit;
}
@@ -385,7 +385,7 @@
}
// When sending via USP events, only one report is ever sent in each USP event
@@ -2388,10 +2545,16 @@ void bulkdata_process_profile_usp_event(
@@ -2354,10 +2511,16 @@
bp->num_retained_reports = 1;
// Exit if unable to generate the report
@@ -405,7 +405,7 @@
return;
}
@@ -2399,15 +2562,15 @@ void bulkdata_process_profile_usp_event(
@@ -2365,15 +2528,15 @@
// Construct event_args manually to avoid the overhead of a malloc and copy of the report in KV_VECTOR_Add()
kv.key = "Data";
@@ -424,7 +424,7 @@
// From the point of view of this code, the report(s) have been successfully sent, so don't retain them
// NOTE: Sending of the reports successfully is delegated to the USP notification retry mechanism
@@ -2869,6 +3032,319 @@ char *bulkdata_generate_json_report(bulk
@@ -2835,6 +2998,319 @@
/*********************************************************************//**
**
@@ -744,7 +744,7 @@
** bulkdata_compress_report
**
** Compresses the report to send
@@ -3020,9 +3496,18 @@ int bulkdata_schedule_sending_http_repor
@@ -2986,9 +3462,18 @@
flags |= BDC_FLAG_DATE_HEADER;
}

View File

@@ -0,0 +1,12 @@
diff --git a/src/protobuf-c/protobuf-c.c b/src/protobuf-c/protobuf-c.c
index 3dc5473..0a6bde7 100644
--- a/src/protobuf-c/protobuf-c.c
+++ b/src/protobuf-c/protobuf-c.c
@@ -1926,6 +1926,7 @@ repeated_field_pack_to_buffer(const ProtobufCFieldDescriptor *field,
buffer->append(buffer, rv, scratch);
tmp = pack_buffer_packed_payload(field, count, array, buffer);
assert(tmp == payload_len);
+ (void)tmp; // Keep cmake production build happy
return rv + payload_len;
} else {
size_t siz;

View File

@@ -1,6 +1,8 @@
diff --git a/src/core/usp_err.c b/src/core/usp_err.c
index 1626e58..6db1d42 100755
--- a/src/core/usp_err.c
+++ b/src/core/usp_err.c
@@ -189,7 +189,9 @@ char *USP_ERR_ToString(int err, char *bu
@@ -189,7 +189,9 @@ char *USP_ERR_ToString(int err, char *buf, int len)
{
#if HAVE_STRERROR_R && !STRERROR_R_CHAR_P
// XSI version of strerror_r

View File

@@ -1,6 +1,8 @@
diff --git a/src/core/mqtt.c b/src/core/mqtt.c
index 04a1a9c..8cb2ad7 100644
--- a/src/core/mqtt.c
+++ b/src/core/mqtt.c
@@ -254,6 +254,8 @@ void QueueUspRecord_MQTT(mqtt_client_t *
@@ -234,6 +234,8 @@ void HandleMqttDisconnect(mqtt_client_t *client);
#define DEFINE_MQTT_TrustCertVerifyCallbackIndex(index) \
int MQTT_TrustCertVerifyCallback_##index (int preverify_ok, X509_STORE_CTX *x509_ctx) \
{\
@@ -9,7 +11,7 @@
return DEVICE_SECURITY_TrustCertVerifyCallbackWithCertChain(preverify_ok, x509_ctx, &mqtt_clients[index].cert_chain);\
}
@@ -264,6 +266,11 @@ DEFINE_MQTT_TrustCertVerifyCallbackIndex
@@ -244,6 +246,11 @@ DEFINE_MQTT_TrustCertVerifyCallbackIndex(1);
DEFINE_MQTT_TrustCertVerifyCallbackIndex(2);
DEFINE_MQTT_TrustCertVerifyCallbackIndex(3);
DEFINE_MQTT_TrustCertVerifyCallbackIndex(4);
@@ -21,7 +23,7 @@
// Add more, with incrementing indexes here, if you change MAX_MQTT_CLIENTS
//------------------------------------------------------------------------------------
@@ -274,10 +281,15 @@ ssl_verify_callback_t* mqtt_verify_callb
@@ -254,10 +261,15 @@ ssl_verify_callback_t* mqtt_verify_callbacks[] = {
MQTT_TrustCertVerifyCallbackIndex(2),
MQTT_TrustCertVerifyCallbackIndex(3),
MQTT_TrustCertVerifyCallbackIndex(4),

View File

@@ -0,0 +1,104 @@
diff --git a/src/core/mqtt.c b/src/core/mqtt.c
index 70a10c2..e8a39cf 100644
--- a/src/core/mqtt.c
+++ b/src/core/mqtt.c
@@ -63,6 +63,8 @@
#include <mosquitto.h>
+#include <curl/curl.h>
+
// Defines for MQTT Property Values
#define PUBLISH 0x30
#define CONTENT_TYPE 3
@@ -2180,6 +2182,75 @@ exit:
}
}
+static int _check_host_rechability(CURL *handle, curl_infotype type, char *data, size_t size, void *userp)
+{
+ bool *palive = (bool *)userp;
+
+ USP_ASSERT(palive != NULL);
+ switch(type) {
+ case CURLINFO_HEADER_OUT:
+ case CURLINFO_HEADER_IN:
+ *palive = true;
+ break;
+ case CURLINFO_TEXT:
+ {
+ USP_LOG_Debug("CURL DATA:: [%s]", data);
+ if (strstr(data, "Connected to ") != NULL) {
+ *palive = true;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int check_mqtt_host_reachability(mqtt_client_t *client)
+{
+ CURL *curl;
+ mqtt_conn_params_t *cparam = &client->conn_params;
+ char buffer[128] = {0};
+ int ret = USP_ERR_INTERNAL_ERROR;
+ bool is_alive = false;
+
+ curl = curl_easy_init();
+ if(curl) {
+ USP_SNPRINTF(buffer, 128, "mqtt://%s:%d", cparam->host, cparam->port);
+ curl_easy_setopt(curl, CURLOPT_URL, buffer);
+
+ if (strlen(cparam->username) > 0) {
+ curl_easy_setopt(curl, CURLOPT_USERNAME, cparam->username);
+ }
+
+ if (strlen(cparam->password) > 0) {
+ curl_easy_setopt(curl, CURLOPT_PASSWORD, cparam->password);
+ }
+
+ curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L);
+ curl_easy_setopt(curl, CURLOPT_DEBUGDATA, &is_alive);
+ curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, _check_host_rechability);
+
+ /* complete within 2 seconds */
+ curl_easy_setopt(curl, CURLOPT_TIMEOUT, 2L);
+
+ ret = curl_easy_perform(curl);
+ /* Check for errors */
+ if(ret == CURLE_OK || ret == CURLE_URL_MALFORMAT || is_alive == true) {
+ USP_LOG_Debug("CURL MQTT host %s, ret %d, alive %d ...", buffer, ret, is_alive);
+ ret = USP_ERR_OK;
+ } else {
+ USP_LOG_Info("# CURL MQTT host %s unreachable: %d=>%s ...", buffer, ret, curl_easy_strerror(ret));
+ }
+
+ /* always cleanup */
+ curl_easy_cleanup(curl);
+ }
+
+ return ret;
+}
+
/*********************************************************************//**
**
** PerformMqttClientConnect
@@ -2249,6 +2320,14 @@ int PerformMqttClientConnect(mqtt_client_t *client)
keep_alive = 5;
}
+ // Below function is a workaround to check the host reachability with a timeout
+ // mosquitto_connect_* API block the thread for 2 mins if host is not reachable,
+ // which halts other clients connectivity
+ err = check_mqtt_host_reachability(client);
+ if (err != USP_ERR_OK) {
+ err = USP_ERR_INTERNAL_ERROR;
+ goto exit;
+ }
// Release the access mutex temporarily whilst performing the connect call
// We do this to prevent the data model thread from potentially being blocked, whilst the connect call is taking place
OS_UTILS_UnlockMutex(&mqtt_access_mutex);

View File

@@ -1,14 +0,0 @@
--- a/src/core/device_uds.c
+++ b/src/core/device_uds.c
@@ -182,10 +182,7 @@ int DEVICE_UDS_Start(void)
USP_SNPRINTF(path, sizeof(path), "%s.%d", device_uds_conn_root, instance);
USP_LOG_Warning("%s: Deleting %s as it contained invalid parameters.", __FUNCTION__, path);
err = DATA_MODEL_DeleteInstance(path, 0);
- if (err != USP_ERR_OK)
- {
- goto exit;
- }
+ goto exit;
}
ucp = FindUdsParamsByInstance(instance);

View File

@@ -1,32 +0,0 @@
--- a/src/core/device_controller.c 2024-08-23 18:22:55.378560809 +0530
+++ b/src/core/device_controller.c 2024-08-23 19:09:07.130278193 +0530
@@ -4282,6 +4282,14 @@
goto exit;
}
+#if defined(E2ESESSION_EXPERIMENTAL_USP_V_1_2)
+ err = ProcessControllerE2ESessionAdded(cont);
+ if (err != USP_ERR_OK)
+ {
+ goto exit;
+ }
+#endif
+
// Exit if unable to get the object instance numbers present in this controller's MTP table
USP_SNPRINTF(path, sizeof(path), "%s.%d.MTP", device_cont_root, cont_instance);
err = DATA_MODEL_GetInstances(path, &iv);
@@ -4323,14 +4331,6 @@
DEVICE_MQTT_UpdateControllerTopics();
#endif
-#if defined(E2ESESSION_EXPERIMENTAL_USP_V_1_2)
- err = ProcessControllerE2ESessionAdded(cont);
- if (err != USP_ERR_OK)
- {
- goto exit;
- }
-#endif
-
// If the code gets here, then we successfully retrieved all data about the controller (even if some of the MTPs were not added)
err = USP_ERR_OK;

View File

@@ -5,13 +5,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=obuspc
PKG_VERSION:=1.0.1.8
PKG_VERSION:=1.0.1.7
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/bbf/obuspa-test-controller.git
PKG_SOURCE_VERSION:=1cf32fa4cb5c07906b1e061a394cf0413a6ad750
PKG_SOURCE_VERSION:=61f14c5a1fba8c251cf1cfb18c163eab62d5674d
PKG_MAINTAINER:=Vivek Dutta <vivek.dutta@iopsys.eu>
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip

View File

@@ -5,13 +5,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=packet-capture-diagnostics
PKG_VERSION:=1.0.1
PKG_VERSION:=1.0.0
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/bbf/packet-capture-diagnostics.git
PKG_SOURCE_VERSION:=6c64e11d78b3be9990714bf5fcd97752cc15c4a8
PKG_SOURCE_VERSION:=a47189b5faa9f678f1a27475c474cc1524d777f4
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif

View File

@@ -5,13 +5,13 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=periodicstats
PKG_VERSION:=1.5.12
PKG_VERSION:=1.5.11
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/bbf/periodicstats.git
PKG_SOURCE_VERSION:=e59f980306e9ef4c1e3e56839906b8c5cba18338
PKG_SOURCE_VERSION:=0bfe78ce9daf1cfbe9453e2cb08327dc7885cea2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif

View File

@@ -6,12 +6,12 @@ include $(TOPDIR)/rules.mk
include $(INCLUDE_DIR)/kernel.mk
PKG_NAME:=qosmngr
PKG_VERSION:=1.0.15
PKG_VERSION:=1.0.11
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=f6b77d16d2103b7336a476e710a10f1dd28274f6
PKG_SOURCE_VERSION:=56829e15bdce24a3eb4f8dfa43355d4b25632c48
PKG_SOURCE_URL:=https://dev.iopsys.eu/iopsys/qosmngr.git
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
@@ -36,8 +36,6 @@ define Package/qosmngr/description
Configures L2 QoS and collects queue statistics
endef
TARGET_CFLAGS += -DBBF_VENDOR_PREFIX=\\\"$(CONFIG_BBF_VENDOR_PREFIX)\\\"
ifeq ($(LOCAL_DEV),1)
define Build/Prepare
$(CP) -rf ./qosmngr/* $(PKG_BUILD_DIR)/
@@ -56,7 +54,6 @@ endif
$(INSTALL_DIR) $(1)/usr/sbin
$(INSTALL_BIN) $(PKG_BUILD_DIR)/qosmngr $(1)/usr/sbin
$(BBFDM_INSTALL_MS_DM) $(PKG_BUILD_DIR)/bbf_plugin/libqos_bbf.so $(1) $(PKG_NAME)
$(BBFDM_INSTALL_MS_PLUGIN) $(PKG_BUILD_DIR)/bbf_plugin/libqos_vendor_bbf.so $(1) $(PKG_NAME)
endef
$(eval $(call BuildPackage,qosmngr))

View File

@@ -26,17 +26,7 @@ hw_intf_init() {
# Initialize the hardware setup library
hw_init_all() {
local tc=0
export TMP_HW_QUEUE_LIST=""
echo clear > /proc/ifc_debug
echo reinit > /proc/ifc_debug
for tc in $(seq 0 7); do
rm -rf "/tmp/qos/dscp_values_${tc}_4"
rm -rf "/tmp/qos/dscp_values_${tc}_6"
done
return 0
}
@@ -160,8 +150,6 @@ hw_commit_all() {
local shape_rate="$TMP_HW_SHAPE_RATE"
local q_count="0"
local mac_qos_flag=""
local pbit=0
local tc=0
# Reorder queues
for q in ${sorted_list} ; do
@@ -202,48 +190,12 @@ hw_commit_all() {
;;
esac
rm -f "/tmp/qos/wan_link_shape_rate"
rm -f "/tmp/qos/wan_link_speed"
if [ "${glob_alg}" != "" ] ; then
/userfs/bin/qosrule discpline $(hw_sc_alg2str ${glob_alg}) ${weight_list} \
uplink-bandwidth ${shape_rate:-10000000} \
queuemask "$(((1 << q_count) - 1))"
echo ${mac_qos_flag} > /proc/qdma_wan/mac_qos_flag
if [ -n "${shape_rate}" ]; then
echo "${shape_rate}" > "/tmp/qos/wan_link_shape_rate"
else
/usr/sbin/qos-uplink-bandwidth
fi
else
/userfs/bin/qosrule discpline Enable 0
fi
if [ -x /userfs/bin/blapi_cmd ]; then
echo 1 > /proc/ifc_send_to_ppe
for tc in $(seq 0 7); do
if [ -s "/tmp/qos/dscp_values_${tc}_4" ]; then
sort -un "/tmp/qos/dscp_values_${tc}_4" | awk 'NR==1{first=$1;last=$1;next}
$1 == last+1 {last=$1;next}
{system("/userfs/bin/blapi_cmd traffic set_traffic_class DSCP " first*4 " " or(last*4, 0x3) " 1");first=$1;last=first}
END{system("/userfs/bin/blapi_cmd traffic set_traffic_class DSCP " first*4 " " or(last*4, 0x3) " 1")}'
fi
if [ -s "/tmp/qos/dscp_values_${tc}_6" ]; then
[ -s "/tmp/qos/dscp_values_${tc}_4" ] && sort -un "/tmp/qos/dscp_values_${tc}_6" | awk 'NR==1{first=$1;last=$1;next}
$1 == last+1 {last=$1;next}
{system("/userfs/bin/blapi_cmd traffic set_traffic_class DSCP " first*4 " " or(last*4, 0x3) " 0");first=$1;last=first}
END{system("/userfs/bin/blapi_cmd traffic set_traffic_class DSCP " first*4 " " or(last*4, 0x3) " 0")}'
sort -un "/tmp/qos/dscp_values_${tc}_6" | awk 'NR==1{first=$1;last=$1;next}
$1 == last+1 {last=$1;next}
{system("/userfs/bin/blapi_cmd traffic set_traffic_class DSCP " first*4 " " or(last*4, 0x3) " 1");first=$1;last=first}
END{system("/userfs/bin/blapi_cmd traffic set_traffic_class DSCP " first*4 " " or(last*4, 0x3) " 1")}'
fi
done
fi
if [ -x /userfs/bin/ifc ]; then
echo 1 > /proc/ifc_send_to_ppe
for pbit in $(seq 0 7); do
/userfs/bin/ifc add vip pbit $pbit
done
fi
}

View File

@@ -26,7 +26,6 @@ broute_ipv4_rule_options()
config_get protocol "$cid" "proto"
config_get dscp_filter "$cid" "dscp_filter"
config_get icmp_type "$cid" "icmp_type"
config_get traffic_class "$cid" "traffic_class"
set_ip_addr "$cid" ebt_match_src_ip ebt_match_dst_ip
@@ -37,7 +36,6 @@ broute_ipv4_rule_options()
tos_val=$((dscp_filter<<2))
tos_hex=$(printf "%x" $tos_val)
broute_filter_on_dscp "$tos_hex"
[ -n "$traffic_class" -a "$dscp_filter" != "0" ] && echo "$((dscp_filter))" >> "/tmp/qos/dscp_values_${traffic_class}_4"
fi
if [ -n "$protocol" ]; then
@@ -59,7 +57,6 @@ broute_ipv6_rule_options()
config_get protocol "$cid" "proto"
config_get dscp_filter "$cid" "dscp_filter"
config_get icmp_type "$cid" "icmp_type"
config_get traffic_class "$cid" "traffic_class"
set_ip_addr "$cid" ebt_match_ipv6_src_ip ebt_match_ipv6_dst_ip
@@ -70,7 +67,6 @@ broute_ipv6_rule_options()
tos_val=$((dscp_filter<<2))
tos_hex=$(printf "%x" $tos_val)
ebt_match_ipv6_dscp "$tos_hex"
[ -n "$traffic_class" -a "$dscp_filter" != "0" ] && echo "$((dscp_filter))" >> "/tmp/qos/dscp_values_${traffic_class}_6"
fi
if [ -n "$protocol" ]; then

View File

@@ -2,10 +2,8 @@
readonly WANPORT="$(jsonfilter -i /etc/board.json -e @.network.wan.device)"
readonly LINKSPEED_FILE="/tmp/qos/wan_link_speed"
readonly LINKSHAPE_FILE="/tmp/qos/wan_link_shape_rate"
[ -z "${PORT}" -o "${WANPORT}" = "${PORT}" ] || exit 0
[ -f "${LINKSHAPE_FILE}" ] && exit 0
[ "${WANPORT}" = "${PORT}" ] || exit 0
LINKSPEED="$(devstatus "${WANPORT}" | jsonfilter -e '@["speed"]' | tr -d 'A-Z')"
PREV_LINKSPEED=$(cat ${LINKSPEED_FILE} 2>/dev/null)

View File

@@ -43,7 +43,6 @@ sort_classify_by_order() {
handle_classify() {
local corder_file="/tmp/qos/classify.order"
local interf=""
while read -r line; do
line_cid=${line#*_}
@@ -61,19 +60,6 @@ handle_classify() {
handle_policer_rules $line_cid
fi
done < "$corder_file"
# Handling config for DSCP to pbit conversion:
# For a given port there can be 64 dscp2pbit mapping and each mapping is
# represented by one UCI config classify section. So there can be 64 possible
# config classify.
#
# For each port, traverse all config classify section and
# extract DSCP to P-bit conversion info.
# generate, dscp2pbit mapping list.
# Then apply dscp2pbit rule
for interf in $(jsonfilter -i /etc/board.json -e @.network.lan.ports[*] -e @.network.lan.device -e @.network.wan.device | xargs); do
handle_ebtables_dscp2pbit "$interf"
[ -n "$BR_RULE_DSCP2PBIT" ] && broute_apply_dscp2pbit_rule
done
}
# Configure classifier based on UCI subtree 'qos.classify'
@@ -121,7 +107,7 @@ setup_qos() {
touch /tmp/qos/qos
cp /etc/config/qos /tmp/qos/qos
fi
create_ebtables_chains
create_iptables_chains
}

View File

@@ -3,23 +3,12 @@
BR_RULE=""
BR6_RULE=""
BR_RULE_DSCP2PBIT=""
DSCP2PBIT_MAPPING=""
init_broute_rule() {
BR_RULE=""
BR6_RULE=""
}
init_broute_dscp2pbit_rule() {
BR_RULE_DSCP2PBIT=""
DSCP2PBIT_MAPPING=""
}
broute_filter_on_l3_if() {
BR_RULE="$BR_RULE --logical-in $1"
}
broute_filter_on_src_if() {
BR_RULE="$BR_RULE --in-if $1"
}
@@ -114,7 +103,7 @@ ebt_match_ip_icmp_type() {
ebt_match_ipv6_protocol() {
#when ethertype is not configured by user then both proto rules of ipv4
#and ipv6 to be installed so update BR6_RULE string as well otherwise
#update BR_RULE only for installation of ipv6 proto rule only.
#update BR_RULE only for installation of ipv6 proto rule only.
if [ -n "$BR6_RULE" ]; then
BR6_RULE="$BR6_RULE --ip6-proto $1"
else
@@ -150,39 +139,14 @@ broute_filter_on_vid() {
}
broute_append_rule() {
# if src_if is loopback, then add the rule to OUTPUT(qos_output) chain of nat table
if [ "$src_if" = "lo" ]; then
echo "ebtables --concurrent -t nat -A qos_output $BR_RULE" >> /tmp/qos/classify.ebtables
if [ -n "$BR6_RULE" ]; then
echo "ebtables --concurrent -t nat -A qos_output $BR6_RULE" >> /tmp/qos/classify.ebtables
fi
return
fi
local broute_chain="$1"
#when ethertype is not configured by user then both proto rules of ipv4
#and ipv6 to be installed otherwise install ipv6 proto rule only.
echo "ebtables --concurrent -t broute -A $broute_chain $BR_RULE" >> /tmp/qos/classify.ebtables
echo "ebtables --concurrent -t broute -A qos $BR_RULE" >> /tmp/qos/classify.ebtables
if [ -n "$BR6_RULE" ]; then
echo "ebtables --concurrent -t broute -A $broute_chain $BR6_RULE" >> /tmp/qos/classify.ebtables
echo "ebtables --concurrent -t broute -A qos $BR6_RULE" >> /tmp/qos/classify.ebtables
fi
}
broute_apply_dscp2pbit_rule() {
# Write dscp2pbit broute rule to classify.ebtables file
echo "ebtables --concurrent -t broute -A dscp2pbits -p 0x8100 $BR_RULE_DSCP2PBIT" >> /tmp/qos/classify.ebtables
}
broute_rule_set_xlate_vid_pbit() {
local vid_mark="$1"
local pcp_mark="$2"
BR_RULE="$BR_RULE -j vlantranslation"
[ -n "$vid_mark" ] && BR_RULE="$BR_RULE --vlanxlate-vid-set $vid_mark"
[ -n "$pcp_mark" ] && BR_RULE="$BR_RULE --vlanxlate-prio-set $pcp_mark"
BR_RULE="$BR_RULE --vlanxlate-target CONTINUE"
}
set_ip_addr()
{
local cid="$1"
@@ -270,18 +234,12 @@ handle_ebtables_rules() {
local protocol=""
local ip_version=""
config_get pcp_mark "$sid" "pcp_mark"
config_get dscp_filter "$sid" "dscp_filter"
# return if its a classfy section for DSCP to p-bit mapping
if [ -n "$pcp_mark" ] && [ -n "$dscp_filter" ]; then
return
fi
init_broute_rule
config_get src_if "$sid" "ifname"
config_get src_mac "$sid" "src_mac"
config_get dst_mac "$sid" "dst_mac"
config_get dscp_filter "$sid" "dscp_filter"
config_get pcp_check "$sid" "pcp_check"
config_get eth_type "$sid" "ethertype"
config_get vid "$sid" "vid_check"
@@ -295,13 +253,6 @@ handle_ebtables_rules() {
config_get traffic_class "$sid" "traffic_class"
config_get protocol "$sid" "proto"
config_get all_interfaces "$sid" "all_interfaces"
config_get l3_ifname "$sid" "l3_ifname"
config_get vid_mark "$sid" "vid_mark"
if [ -n "$l3_ifname" ]; then
broute_filter_on_l3_if "$l3_ifname"
is_l2_rule=1
fi
if [ "$all_interfaces" == "1" ]; then
is_l2_rule=1
@@ -452,83 +403,11 @@ handle_ebtables_rules() {
[ -n "$traffic_class" ] && broute_rule_set_traffic_class "$traffic_class"
if [ -n "$vid_mark" ] || [ -n "$pcp_mark" ]; then
broute_rule_set_xlate_vid_pbit "$vid_mark" "$pcp_mark"
fi
if [ -n "$BR_RULE" ]; then
if [ -n "$vid_mark" ] || [ -n "$pcp_mark" ]; then
broute_append_rule "prevlanxlate" "$src_if"
else
broute_append_rule "qos" "$src_if"
fi
fi
}
handle_ebtables_dscp2pbit() {
local in_if=$1
local dscp_filter=""
local pcp_mark=""
local ifname=""
local dscp2pbit_mapping_list=""
local corder_file="/tmp/qos/classify.order"
if [ -z "$in_if" ]; then
return
fi
init_broute_dscp2pbit_rule
while read -r line; do
line_cid=${line#*_}
config_get dscp_filter "$line_cid" "dscp_filter"
config_get pcp_mark "$line_cid" "pcp_mark"
# return if not a dscp to p-bit rule
if [ -z "$dscp_filter" ] || [ -z "$pcp_mark" ]; then
continue
fi
config_get ifname "$line_cid" "ifname"
# return if this config is not for the currently processing interface (in_if)
if [ -n "$ifname" ] && [ "$ifname" != "$in_if" ]; then
continue
fi
dscp2pbit_mapping_list="$dscp2pbit_mapping_list,$dscp_filter=$pcp_mark"
done < "$corder_file"
# if not dscp2pbit config found for our interface, return
[ -z "$dscp2pbit_mapping_list" ] && return
# remove first character(comma) from the dscp2pbit_mapping_list, not required.
dscp2pbit_mapping_list="${dscp2pbit_mapping_list:1}"
# construct ebtables rule:
BR_RULE_DSCP2PBIT=" -i $in_if -j dscp2pbit --dscp2pbit-mapping $dscp2pbit_mapping_list --dscp2pbit-target CONTINUE"
[ -n "$BR_RULE" ] && broute_append_rule
}
create_ebtables_chains() {
ebtables --concurrent -t nat -N qos_output -P RETURN 2> /dev/null
ret=$?
if [ $ret -eq 0 ]; then
ebtables --concurrent -t nat -A OUTPUT -j qos_output
else
ebtables --concurrent -t nat -D OUTPUT -j qos_output
ebtables --concurrent -t nat -A OUTPUT -j qos_output
fi
ebtables --concurrent -t broute -N dscp2pbits -P RETURN 2> /dev/null
ret=$?
if [ $ret -eq 0 ]; then
ebtables --concurrent -t broute -A BROUTING -j dscp2pbits
else
ebtables --concurrent -t broute -D BROUTING -j dscp2pbits
ebtables --concurrent -t broute -A BROUTING -j dscp2pbits
fi
ebtables --concurrent -t broute -N qos -P RETURN 2> /dev/null
ebtables --concurrent -t broute -N qos 2> /dev/null
ret=$?
if [ $ret -eq 0 ]; then
ebtables --concurrent -t broute -A BROUTING -j qos
@@ -536,22 +415,9 @@ create_ebtables_chains() {
ebtables --concurrent -t broute -D BROUTING -j qos
ebtables --concurrent -t broute -A BROUTING -j qos
fi
ebtables --concurrent -t broute -N prevlanxlate -P RETURN 2> /dev/null
ret=$?
if [ $ret -eq 0 ]; then
ebtables --concurrent -t broute -I BROUTING -j prevlanxlate
else
ebtables --concurrent -t broute -D BROUTING -j prevlanxlate
ebtables --concurrent -t broute -I BROUTING -j prevlanxlate
fi
}
flush_ebtables_chains() {
echo "ebtables --concurrent -t nat -F qos_output" > /tmp/qos/classify.ebtables
echo "ebtables --concurrent -t broute -F qos" > /tmp/qos/classify.ebtables
echo "ebtables --concurrent -t broute -F dscp2pbits" >> /tmp/qos/classify.ebtables
echo "ebtables --concurrent -t broute -F prevlanxlate" >> /tmp/qos/classify.ebtables
echo "ebtables -t broute -F qos" > /tmp/qos/classify.ebtables
}

Some files were not shown because too many files have changed in this diff Show More