Compare commits

..

1 Commits

Author SHA1 Message Date
Janusz Dziedzic
2e82c5e75a map-topology: add ieee1905 dependency
Signed-off-by: Janusz Dziedzic <janusz.dziedzic@iopsys.eu>
2022-05-13 09:15:45 +00:00
642 changed files with 13646 additions and 380620 deletions

78
ated/Makefile Normal file
View File

@@ -0,0 +1,78 @@
#
# Copyright (C) 2019 iopsys Software Solutions AB. All rights reserved.
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
include $(TOPDIR)/rules.mk
include $(INCLUDE_DIR)/kernel.mk
PKG_NAME:=ated
PKG_VERSION:=1.2.2
PKG_RELEASE:=1
PKG_SOURCE_VERSION:=f614cba983d827d5185c60a6a5a35530621d44d2
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/mediatek/ated.git
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_RELEASE)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
PKG_LICENSE:=GPLv2
PKG_LICENSE_FILES:=LICENSE
# support parallel build
#PKG_BUILD_PARALLEL:=1
#re create configure scripts if not present.
#PKG_FIXUP:=autoreconf
# run install target when cross compiling. basically, make install DESTDIR=$(PKG_INSTALL_DIR)
# this way we don't need to pick out the resulting files from the build dir.
PKG_INSTALL:=1
include $(INCLUDE_DIR)/package.mk
define Package/ated
CATEGORY:=Utilities
TITLE:=Daemon for handling wifi calibration
URL:=
DEPENDS:= libc
endef
define Package/ated/description
Daemon for handling wifi calibration
endef
#TARGET_CFLAGS += -I$(LINUX_DIR)/include -I$(LINUX_DIR)/arch/mips/include
MAKE_FLAGS += \
v=2 \
m=3
#TARGET_CPPFLAGS := \
# -I$(STAGING_DIR)/usr/include/bcm963xx/shared/opensource/include/bcm963xx \
# -I$(STAGING_DIR)/usr/include/bcm963xx/bcmdrivers/opensource/include/bcm963xx \
# $(TARGET_CPPFLAGS)
# we donot wwant to have any install.
define Build/Install/Default
endef
define Package/ated/install
$(INSTALL_DIR) $(1)/sbin
$(INSTALL_BIN) $(PKG_BUILD_DIR)/ated $(1)/sbin/
endef
$(eval $(call BuildPackage,ated))

27
bbf/Config_bbfdm.in Normal file
View File

@@ -0,0 +1,27 @@
config BBF_VENDOR_EXTENSION
bool "Enable Vendor Extension"
default y
config BBF_VENDOR_LIST
string "Vendor List"
default "iopsys"
config BBF_VENDOR_PREFIX
string "Vendor Prefix"
default "X_IOPSYS_EU_"
config BBF_TR104
bool "Enable TR-104 Data Model Support"
default y
config BBF_TR143
bool "Enable TR-143 Data Model Support"
default y
config BBFDM_ENABLE_JSON_PLUGIN
bool "Enable json plugin to extend datamodel"
default y
config BBFDM_ENABLE_DOTSO_PLUGIN
bool "Enable shared library plugin to extend datamodel"
default y

185
bbf/Makefile Normal file
View File

@@ -0,0 +1,185 @@
#
# Copyright (C) 2022 IOPSYS
#
include $(TOPDIR)/rules.mk
PKG_NAME:=libbbfdm
PKG_VERSION:=6.6.35
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/iopsys/bbf.git
PKG_SOURCE_VERSION:=0159ecf5d84805d76dff8cb80831334e200989c7
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
PKG_RELEASE=$(PKG_SOURCE_VERSION)
PKG_LICENSE:=LGPL-2.1
PKG_LICENSE_FILES:=LICENSE
PKG_FIXUP:=autoreconf
include $(INCLUDE_DIR)/package.mk
define Package/libbbf_api
SECTION:=utils
CATEGORY:=Utilities
SUBMENU:=TRx69
TITLE:=Library for libbbfdm API
endef
define Package/libbbfdm/default
SECTION:=utils
CATEGORY:=Utilities
SUBMENU:=TRx69
TITLE:=Library for broadband forum data model support
DEPENDS:=+libuci +libubox +libubus +libblobmsg-json +libjson-c +libtrace +libcurl
endef
define Package/libbbfdm
$(Package/libbbfdm/default)
TITLE += (internal)
VARIANT:=internal
endef
define Package/libbbfdm-mbedtls
$(Package/libbbfdm/default)
TITLE += (mbedtls)
DEPENDS += +PACKAGE_libbbfdm-mbedtls:libmbedtls
VARIANT:=mbedtls
DEFAULT_VARIANT:=1
endef
define Package/libbbfdm-openssl
$(Package/libbbfdm/default)
TITLE += (openssl)
DEPENDS += +PACKAGE_libbbfdm-openssl:libopenssl
CONFLICTS := libbbfdm-mbedtls
VARIANT:=openssl
endef
define Package/libbbfdm-wolfssl
$(Package/libbbfdm/default)
TITLE += (wolfssl)
DEPENDS += +PACKAGE_libbbfdm-wolfssl:libwolfssl
CONFLICTS := libbbfdm-mbedtls libbbfdm-openssl
VARIANT:=wolfssl
endef
define Package/libbbfdm/config
source "$(SOURCE)/Config_bbfdm.in"
endef
define Package/libbbf_api/description
Library contains the API(UCI, UBUS, JSON, CLI and Browse) of libbbfdm
endef
define Package/libbbfdm/description
Library contains the data model tree. It includes TR181, TR104 and TR143 data models
endef
USE_LOCAL=$(shell ls ./src/ 2>/dev/null >/dev/null && echo 1)
ifneq ($(USE_LOCAL),)
define Build/Prepare
$(CP) ./src/* $(PKG_BUILD_DIR)/
endef
endif
TARGET_CFLAGS += \
-D_GNU_SOURCE \
-Wall -Werror \
CONFIGURE_ARGS += \
--enable-tr181
ifeq ($(CONFIG_BBF_TR104),y)
CONFIGURE_ARGS += \
--enable-tr104
endif
ifeq ($(CONFIG_BBF_TR143),y)
CONFIGURE_ARGS += \
--enable-tr143
endif
ifeq ($(CONFIG_BBF_VENDOR_EXTENSION),y)
CONFIGURE_ARGS += \
--enable-vendor-extension
CONFIGURE_ARGS += \
BBF_VENDOR_LIST="$(CONFIG_BBF_VENDOR_LIST)" \
BBF_VENDOR_PREFIX="$(CONFIG_BBF_VENDOR_PREFIX)"
endif ##CONFIG_BBF_VENDOR_EXTENSION
ifeq ($(CONFIG_BBFDM_ENABLE_JSON_PLUGIN),y)
CONFIGURE_ARGS += \
--enable-json-plugin
endif
ifeq ($(CONFIG_BBFDM_ENABLE_DOTSO_PLUGIN),y)
CONFIGURE_ARGS += \
--enable-shared-library
endif
ifeq ($(BUILD_VARIANT),openssl)
CONFIGURE_ARGS += --enable-libopenssl
TARGET_CFLAGS += -I$(STAGING_DIR)/usr/include/
endif
ifeq ($(BUILD_VARIANT),wolfssl)
CONFIGURE_ARGS += --enable-libwolfssl
TARGET_CFLAGS += -I$(STAGING_DIR)/usr/include/wolfssl
endif
ifeq ($(BUILD_VARIANT),mbedtls)
CONFIGURE_ARGS += --enable-libmbedtls
TARGET_CFLAGS += -I$(STAGING_DIR)/usr/include/
endif
define Package/libbbf_api/install
$(INSTALL_DIR) $(1)/lib
endef
define Package/libbbfdm/default/install
$(INSTALL_DIR) $(1)/lib
$(INSTALL_DIR) $(1)/usr/share/bbfdm
$(INSTALL_DIR) $(1)/etc/bbfdm
$(INSTALL_DIR) $(1)/etc/bbfdm/dmmap
$(INSTALL_DIR) $(1)/etc/bbfdm/json
$(INSTALL_DIR) $(1)/usr/lib/bbfdm
$(CP) $(PKG_BUILD_DIR)/bin/.libs/libbbfdm.so* $(1)/lib/
$(INSTALL_BIN) $(PKG_BUILD_DIR)/scripts/* $(1)/usr/share/bbfdm
endef
define Package/libbbfdm/default/prerm
#!/bin/sh
rm -rf /etc/bbfdm/dmmap/*
exit 0
endef
Package/libbbfdm-openssl/prerm = $(Package/libbbfdm/default/prerm)
Package/libbbfdm-wolfssl/prerm = $(Package/libbbfdm/default/prerm)
Package/libbbfdm-mbedtls/prerm = $(Package/libbbfdm/default/prerm)
Package/libbbfdm-openssl/install = $(Package/libbbfdm/default/install)
Package/libbbfdm-wolfssl/install = $(Package/libbbfdm/default/install)
Package/libbbfdm-mbedtls/install = $(Package/libbbfdm/default/install)
define Build/InstallDev
$(INSTALL_DIR) $(1)/usr/lib
$(INSTALL_DIR) $(1)/usr/include
$(INSTALL_DIR) $(1)/usr/include/libbbfdm
$(INSTALL_DIR) $(1)/usr/include/libbbf_api
$(INSTALL_DATA) $(PKG_BUILD_DIR)/include/*.h $(1)/usr/include/
$(INSTALL_DATA) $(PKG_BUILD_DIR)/*.h $(1)/usr/include/libbbfdm/
$(INSTALL_DATA) $(PKG_BUILD_DIR)/libbbf_api/*.h $(1)/usr/include/libbbf_api/
$(CP) $(PKG_BUILD_DIR)/bin/.libs/libbbfdm.{a,so*} $(1)/usr/lib/
touch $(1)/usr/lib/libbbf_api.so
endef
$(eval $(call BuildPackage,libbbf_api))
$(eval $(call BuildPackage,libbbfdm))
$(eval $(call BuildPackage,libbbfdm-openssl))
$(eval $(call BuildPackage,libbbfdm-wolfssl))
$(eval $(call BuildPackage,libbbfdm-mbedtls))

View File

@@ -1,10 +0,0 @@
if PACKAGE_bbfdmd
config BBF_VENDOR_PREFIX
string "Vendor Prefix"
default "X_IOWRT_EU_"
config BBF_OBFUSCATION_KEY
string "Obfuscation key"
default "371d530c95a17d1ca223a29b7a6cdc97e1135c1e0959b51106cca91a0b148b5e42742d372a359760742803f2a44bd88fca67ccdcfaeed26d02ce3b6049cb1e04"
endif

View File

@@ -1,217 +0,0 @@
#
# Copyright (C) 2023 IOPSYS
#
include $(TOPDIR)/rules.mk
PKG_NAME:=bbfdm
PKG_VERSION:=1.16.6
USE_LOCAL:=0
ifneq ($(USE_LOCAL),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/bbf/bbfdm.git
PKG_SOURCE_VERSION:=1615b42e405faceceac825f9c0387a58b90785ae
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif
PKG_LICENSE:=BSD-3-Clause
PKG_LICENSE_FILES:=LICENSE
RSTRIP:=true
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/cmake.mk
include bbfdm.mk
define Package/libbbfdm-api
SECTION:=utils
CATEGORY:=Utilities
SUBMENU:=TRx69
TITLE:=BBF datamodel library, provides API to extend datamodel using DotSO plugins
DEPENDS:=+libuci +libubox +libubus +libblobmsg-json +libjson-c +libcurl
ABI_VERSION:=1.0
endef
define Package/libbbfdm-ubus
SECTION:=utils
CATEGORY:=Utilities
SUBMENU:=TRx69
TITLE:=BBF datamodel ubus library, provides API to expose datamodel over ubus
DEPENDS:=+libuci +libubox +libubus +libblobmsg-json +libjson-c +libbbfdm-api
endef
define Package/bbfdmd
SECTION:=utils
CATEGORY:=Utilities
SUBMENU:=TRx69
TITLE:=Datamodel ubus backend to expose core tree
DEPENDS:=+libuci +libubox +libubus +libblobmsg-json +libjson-c +libbbfdm-api +libopenssl +libbbfdm-ubus +bbf_configmngr
endef
define Package/dm-service
SECTION:=utils
CATEGORY:=Utilities
SUBMENU:=TRx69
TITLE:=Datamodel ubus backend to expose micro-service tree
DEPENDS:=+libuci +libubox +libubus +libblobmsg-json +libjson-c +libbbfdm-api +libbbfdm-ubus +bbf_configmngr
endef
define Package/bbf_configmngr
SECTION:=utils
CATEGORY:=Utilities
SUBMENU:=TRx69
TITLE:= BBF Config Manager
DEPENDS:=+libuci +libubox +libubus +libblobmsg-json
endef
define Package/bbfdmd/config
source "$(SOURCE)/Config_bbfdmd.in"
endef
# Below config is a hack to force-recompile dependent micro-services
define Package/libbbfdm-api/config
if PACKAGE_bbfdmd
config BBF_LIBBBFDM_VERSION
string "Internal config variable to force recompile"
default "v${PKG_VERSION}"
endif
endef
define Package/libbbfdm-api/description
Library contains the API(UCI, UBUS, JSON, CLI and Browse) of libbbfdm
endef
define Package/libbbfdm-ubus/description
Library contains the APIs to expose data model over ubus
endef
define Package/bbfdmd/description
Daemon to expose Datamodel core tree
endef
define Package/dm-service/description
Daemon to expose Datamodel micro-service tree
endef
define Package/bbf_configmngr/description
Daemon for handling bbf reload services via ubus bbf.config
endef
ifeq ($(USE_LOCAL),1)
define Build/Prepare
$(CP) ~/git/bbfdm/* $(PKG_BUILD_DIR)/
endef
endif
CMAKE_OPTIONS += \
-DBBF_VENDOR_PREFIX:String="$(CONFIG_BBF_VENDOR_PREFIX)" \
-DBBFDMD_MAX_MSG_LEN:Integer=10485760 \
-DCMAKE_BUILD_TYPE:String="Debug" \
define Package/libbbfdm-api/install
$(INSTALL_DIR) $(1)/lib
$(CP) $(PKG_BUILD_DIR)/libbbfdm-api/legacy/libbbfdm-api.so $(1)/lib/
$(CP) $(PKG_BUILD_DIR)/libbbfdm-api/version-2/libbbfdm-api-v2.so $(1)/lib/
$(INSTALL_DIR) $(1)/usr/libexec/rpcd
$(CP) $(PKG_BUILD_DIR)/utilities/files/usr/libexec/rpcd/bbf.secure $(1)/usr/libexec/rpcd/bbf.secure
$(CP) $(PKG_BUILD_DIR)/utilities/files/usr/libexec/rpcd/bbf.diag $(1)/usr/libexec/rpcd/bbf.diag
$(INSTALL_DIR) $(1)/usr/share/bbfdm/scripts/
$(CP) $(PKG_BUILD_DIR)/utilities/files/usr/share/bbfdm/scripts/bbf_api $(1)/usr/share/bbfdm/scripts/
$(INSTALL_DIR) $(1)/etc/bbfdm/certificates
echo "$(CONFIG_BBF_OBFUSCATION_KEY)" > $(1)/etc/bbfdm/.secure_hash
endef
define Package/libbbfdm-ubus/install
$(INSTALL_DIR) $(1)/lib
$(CP) $(PKG_BUILD_DIR)/libbbfdm-ubus/libbbfdm-ubus.so $(1)/lib/
endef
define Package/bbfdmd/install
$(INSTALL_DIR) $(1)/etc/bbfdm/dmmap
$(INSTALL_DIR) $(1)/usr/share/bbfdm
$(INSTALL_DIR) $(1)/etc/config
$(INSTALL_CONF) ./files/etc/config/bbfdm $(1)/etc/config/bbfdm
$(INSTALL_CONF) ./files/etc/config/schedules $(1)/etc/config/schedules
$(INSTALL_DIR) $(1)/usr/sbin
$(INSTALL_BIN) $(PKG_BUILD_DIR)/bbfdmd/ubus/bbfdmd $(1)/usr/sbin/
$(STRIP) $(1)/usr/sbin/bbfdmd
$(INSTALL_DIR) $(1)/etc/init.d
$(INSTALL_BIN) ./files/etc/init.d/bbfdmd $(1)/etc/init.d/bbfdmd
$(INSTALL_DIR) $(1)/etc/hotplug.d/iface
$(INSTALL_BIN) ./files/etc/hotplug.d/iface/85-bbfdm-sysctl $(1)/etc/hotplug.d/iface/85-bbfdm-sysctl
$(INSTALL_DIR) $(1)/lib/upgrade/keep.d
$(INSTALL_DATA) ./files/lib/upgrade/keep.d/bbf $(1)/lib/upgrade/keep.d/bbf
$(INSTALL_DIR) $(1)/etc/uci-defaults
$(INSTALL_BIN) ./files/etc/uci-defaults/91-fix-bbfdmd-enabled-option $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/uci-defaults/ruleng.bbfdm $(1)/etc/uci-defaults
$(INSTALL_DIR) $(1)/etc/ruleng
$(INSTALL_BIN) ./files/etc/ruleng/bbfdm.json $(1)/etc/ruleng
endef
define Package/dm-service/install
$(INSTALL_DIR) $(1)/etc/init.d
$(INSTALL_BIN) ./files/etc/init.d/bbfdm.services $(1)/etc/init.d/
$(INSTALL_DIR) $(1)/usr/sbin
$(INSTALL_BIN) $(PKG_BUILD_DIR)/dm-service/dm-service $(1)/usr/sbin/
$(BBFDM_REGISTER_SERVICES) -v ${CONFIG_BBF_VENDOR_PREFIX} ./bbfdm_service.json $(1) core
$(BBFDM_INSTALL_MS_DM) $(PKG_BUILD_DIR)/libbbfdm/libcore.so $(1) core
endef
define Package/bbf_configmngr/install
$(INSTALL_DIR) $(1)/etc/init.d
$(INSTALL_DIR) $(1)/etc/bbfdm/
$(INSTALL_DIR) $(1)/usr/sbin
$(INSTALL_DIR) $(1)/usr/share/bbfdm/scripts
$(INSTALL_BIN) $(PKG_BUILD_DIR)/utilities/bbf_configd $(1)/usr/sbin/
$(STRIP) $(1)/usr/sbin/bbf_configd
$(INSTALL_BIN) ./files/etc/init.d/bbf_configd $(1)/etc/init.d/bbf_configd
$(INSTALL_BIN) $(PKG_BUILD_DIR)/utilities/files/usr/share/bbfdm/scripts/bbf_config_notify.sh $(1)/usr/share/bbfdm/scripts/
$(INSTALL_DATA) ./files/etc/bbfdm/critical_services.json $(1)/etc/bbfdm/
endef
define Package/bbfdmd/prerm
#!/bin/sh
rm -rf /etc/bbfdm/dmmap/*
exit 0
endef
define Build/InstallDev
$(INSTALL_DIR) $(1)/usr/lib
$(INSTALL_DIR) $(1)/usr/include
$(INSTALL_DIR) $(1)/usr/include/libbbfdm-api
$(INSTALL_DIR) $(1)/usr/include/libbbfdm-api/legacy
$(INSTALL_DIR) $(1)/usr/include/libbbfdm-api/version-2
$(INSTALL_DIR) $(1)/usr/include/libbbfdm-ubus
$(INSTALL_DATA) $(PKG_BUILD_DIR)/libbbfdm-api/legacy/*.h $(1)/usr/include/libbbfdm-api/
$(INSTALL_DATA) $(PKG_BUILD_DIR)/libbbfdm-api/legacy/*.h $(1)/usr/include/libbbfdm-api/legacy/
$(INSTALL_DATA) $(PKG_BUILD_DIR)/libbbfdm-api/version-2/*.h $(1)/usr/include/libbbfdm-api/
$(INSTALL_DATA) $(PKG_BUILD_DIR)/libbbfdm-api/version-2/*.h $(1)/usr/include/libbbfdm-api/version-2/
$(INSTALL_DATA) $(PKG_BUILD_DIR)/libbbfdm-ubus/bbfdm-ubus.h $(1)/usr/include/libbbfdm-ubus/
$(INSTALL_DATA) $(PKG_BUILD_DIR)/libbbfdm-api/legacy/include/*.h $(1)/usr/include/
$(CP) $(PKG_BUILD_DIR)/libbbfdm-api/legacy/libbbfdm-api.so $(1)/usr/lib
$(CP) $(PKG_BUILD_DIR)/libbbfdm-api/version-2/libbbfdm-api-v2.so $(1)/usr/lib
$(CP) $(PKG_BUILD_DIR)/libbbfdm-ubus/libbbfdm-ubus.so $(1)/usr/lib
endef
$(eval $(call BuildPackage,bbf_configmngr))
$(eval $(call BuildPackage,libbbfdm-api))
$(eval $(call BuildPackage,libbbfdm-ubus))
$(eval $(call BuildPackage,bbfdmd))
$(eval $(call BuildPackage,dm-service))

View File

@@ -1,38 +0,0 @@
# BBFDM configuration options and utilities
bbfdm provides few compile time configuration options and compile time help utility called [bbfdm.mk](./bbfdm.mk), this document aimed to explain the available usages and best practices.
## Compilation options
| Configuration option | Description | Default Value |
| ----------------------- | ------------- | ----------- |
| CONFIG_BBF_VENDOR_LIST | List of vendor extension directories | iopsys |
| CONFIG_BBF_VENDOR_PREFIX | Prefix for Vendor extension datamodel objects/parameters | X_IOPSYS_EU_ |
| CONFIG_BBF_MAX_OBJECT_INSTANCES | Maximum number of instances per object | 255 |
| BBF_OBFUSCATION_KEY | Hash used to encode/decode in `bbf.secure` object | 371d530c95a17d1ca223a29b7a6cdc97e1135c1e0959b51106cca91a0b148b5e42742d372a359760742803f2a44bd88fca67ccdcfaeed26d02ce3b6049cb1e04 |
#### BBF_OBFUSCATION_KEY
`bbfdm` provides an ubus object called `bbf.secure` to allow encoding/decoding the values, `bbf.secure` currently support following methods internally to encode/decode
- Encode/Decode using a predefined SHA512 Hash key
- Encode/Decode using a private/public RSA key pair
The `BBF_OBFUSCATION_KEY` compile time configuration option used to defined the SHA512 HASH, if this option is undefined, then it usages a default value as mention in the above table.
User must override this parameter with their own hash value, to generate a hash user can run below command and copy the hash value to this option.
ex: User wants to use 'Sup3rS3cur3Passw0rd' as passkey, then can get the SHA512 sum with
```bash
$ echo -n "Sup3rS3cur3Passw0rd" | sha512sum
371d530c95a17d1ca223a29b7a6cdc97e1135c1e0959b51106cca91a0b148b5e42742d372a359760742803f2a44bd88fca67ccdcfaeed26d02ce3b6049cb1e04 -
```
> Note: Additionally, user can install RSA private key in '/etc/bbfdm/certificates/private_key.pem' path, if private key is present `bbf.secure` shall use rsa private certificate for encrypt/decrypt function. In case of key not present in the pre-defined path, hash will be used for the same.
## Helper utility (bbfdm.mk)
bbfdm provides a helper utility [bbfdm.mk](./bbfdm.mk) to install datamodel plugins in bbfdm core or in microservice directory.

View File

@@ -1,92 +0,0 @@
#
# Copyright (C) 2023 IOPSYS
#
BBFDM_BASE_DM_PATH=/usr/share/bbfdm
BBFDM_INPUT_PATH=/etc/bbfdm/micro_services
BBFDM_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
PKG_CONFIG_DEPENDS += CONFIG_BBF_LIBBBFDM_VERSION
#BBFDM_VERSION:=$(shell grep -oP '(?<=^PKG_VERSION:=).*' ${BBFDM_DIR}/Makefile)
#BBFDM_TOOLS:=$(BUILD_DIR)/bbfdm-$(BBFDM_VERSION)/tools
# Utility to install the plugin in bbfdm core path with priority.
# Its now possible to overwrite/remove core datamodel with plugin, so, if some
# datamodel objects/parameters are present in more than one plugin, order in
# which they loaded into memory becomes crucial, this Utility help to configure
# a priority order in which they gets loaded in memory.
#
# ARGS:
# $1 => Plugin artifact
# $2 => package install directory
# $3 => Priority of the installed plugin (Optional)
#
# Note:
# - Last loaded plugin gets the highest priority
#
# Example:
# BBFDM_INSTALL_CORE_PLUGIN ./files/etc/bbfdm/json/CWMPManagementServer.json $(1)
#
# Example to install plugin with priority:
# BBFDM_INSTALL_CORE_PLUGIN ./files/etc/bbfdm/json/CWMPManagementServer.json $(1) 01
#
BBFDM_INSTALL_CORE_PLUGIN:=$(BBFDM_DIR)/tools/bbfdm.sh -p
# Utility to install the micro-service datamodel
# Use Case:
# user wants to run a datamodel micro-service, it required to install the
# DotSO/JSON plugin into a bbf shared directory, this utility helps in
# installing the DotSO/JSON plugin in bbfdm shared directory, and auto-generate
# input file for the micro-service
#
# ARGS:
# $1 => DotSo or Json plugin with complete path
# $2 => package install directory
# $3 => service name
#
# Note:
# - There could be only one main plugin file, so its bind to PKG_NAME
# - Micro-service input.json will be auto generated with this call
#
# Example:
# BBFDM_INSTALL_MS_DM $(PKG_BUILD_DIR)/libcwmp.so $(1) $(PKG_NAME)
#
BBFDM_INSTALL_MS_DM:=$(BBFDM_DIR)/tools/bbfdm.sh -m
# Utility to install a plugins in datamodel micro-service
#
# ARGS:
# $1 => DotSo or Json plugin with complete path
# $2 => package install directory
# $3 => service name
#
# Note:
# - Use the service_name/PKG_NAME of the service in which this has to run
#
# Example:
# BBFDM_INSTALL_MS_PLUGIN $(PKG_BUILD_DIR)/libxmpp.so $(1) icwmp
#
BBFDM_INSTALL_MS_PLUGIN:=$(BBFDM_DIR)/tools/bbfdm.sh -m -p
# Utility to install the helper scripts in default bbfdm script path
#
# Use Case:
# User want to install some script for running diagnostics
#
# ARGS:
# $1 => Script with complete path
# $2 => package install directory
#
# Note:
# - Use with -d option to install script in bbf.diag directory
#
# Example:
# BBFDM_INSTALL_SCRIPT $(PKG_BUILD_DIR)/download $(1)
# BBFDM_INSTALL_SCRIPT -d $(PKG_BUILD_DIR)/ipping $(1)
#
BBFDM_INSTALL_SCRIPT:=$(BBFDM_DIR)/tools/bbfdm.sh -s
BBFDM_REGISTER_SERVICES:=$(BBFDM_DIR)/tools/bbfdm.sh -t

View File

@@ -1,54 +0,0 @@
{
"daemon": {
"enable": "1",
"service_name": "core",
"unified_daemon": false,
"services": [
{
"parent_dm": "Device.",
"object": "LANConfigSecurity"
},
{
"parent_dm": "Device.",
"object": "Schedules"
},
{
"parent_dm": "Device.",
"object": "Security",
"proto": "cwmp"
},
{
"parent_dm": "Device.",
"object": "PacketCaptureDiagnostics"
},
{
"parent_dm": "Device.",
"object": "SelfTestDiagnostics"
},
{
"parent_dm": "Device.",
"object": "Syslog"
},
{
"parent_dm": "Device.",
"object": "{BBF_VENDOR_PREFIX}OpenVPN",
"proto": "usp"
},
{
"parent_dm": "Device.",
"object": "RootDataModelVersion"
},
{
"parent_dm": "Device.",
"object": "Reboot()"
},
{
"parent_dm": "Device.",
"object": "FactoryReset()"
}
],
"config": {
"loglevel": "3"
}
}
}

View File

@@ -1,23 +0,0 @@
{
"usp": [
"firewall",
"network",
"dhcp",
"time",
"wireless",
"ieee1905",
"mapcontroller",
"mosquitto",
"nginx",
"netmode"
],
"cwmp": [
"firewall",
"network",
"dhcp",
"mapcontroller",
"wireless",
"time",
"netmode"
]
}

View File

@@ -1,12 +0,0 @@
config bbfdmd 'bbfdmd'
option enable '1'
option debug '0'
option loglevel '3'
config micro_services 'micro_services'
option enable '1'
option enable_core '0'
config reload_handler 'reload_handler'
option log_level '1'

View File

@@ -1,2 +0,0 @@
config global 'global'
option enable '1'

View File

@@ -1,17 +0,0 @@
#!/bin/sh
[ "$ACTION" = "ifup" ] || exit 0
apply_sysctl_configuration() {
local sysctl_conf
sysctl_conf="/etc/bbfdm/sysctl.conf"
[ -f "${sysctl_conf}" ] || touch "${sysctl_conf}"
sysctl -e -p "${sysctl_conf}" >&-
}
ubus -t 10 wait_for network.device
apply_sysctl_configuration

View File

@@ -1,41 +0,0 @@
#!/bin/sh /etc/rc.common
START=80
STOP=07
USE_PROCD=1
PROG=/usr/sbin/bbf_configd
log() {
echo "${@}"|logger -t bbf.config -p info
}
create_needed_directories()
{
mkdir -p /tmp/bbfdm/.cwmp
mkdir -p /tmp/bbfdm/.usp
mkdir -p /tmp/bbfdm/.bbfdm
}
start_service()
{
local log_level
create_needed_directories
config_load bbfdm
config_get log_level "reload_handler" log_level 2
procd_open_instance "bbf_configd"
procd_set_param command ${PROG}
procd_append_param command -l "${log_level}"
procd_set_param respawn
procd_close_instance "bbf_configd"
}
service_triggers() {
for config_file in /etc/config/*; do
config_name=$(basename "$config_file")
procd_add_config_trigger "config.change" "$config_name" /usr/share/bbfdm/scripts/bbf_config_notify.sh
done
}

View File

@@ -1,127 +0,0 @@
#!/bin/sh /etc/rc.common
START=97
STOP=05
USE_PROCD=1
PROG=/usr/sbin/dm-service
DM_AGENT_PROG=/usr/sbin/dm-agent
BBFDM_MICROSERVICE_DIR="/etc/bbfdm/services"
. /usr/share/libubox/jshn.sh
log() {
echo "${@}"|logger -t bbfdmd.services -p info
}
validate_bbfdm_micro_service_section()
{
uci_validate_section bbfdm micro_services "micro_services" \
'enable:bool:true' \
'enable_core:bool:false'
}
_add_microservice()
{
local name path loglevel
local enable enable_core unified_daemon dm_framework
local daemon_prog
# Check enable from micro-service
path="${1}"
enable_core="${2}"
name="$(basename ${path})"
name="${name//.json}"
json_load_file "${path}"
json_select daemon
json_get_var enable enable 1
if [ "${enable}" -eq "0" ]; then
log "datamodel micro-service ${name} not enabled"
return 0
fi
json_get_var unified_daemon unified_daemon 0
if [ "${unified_daemon}" -eq "1" ]; then
return 0
fi
json_get_var dm_framework dm-framework 0
if [ "${dm_framework}" -eq "1" ] || [ "${dm_framework}" = "true" ]; then
daemon_prog="${DM_AGENT_PROG}"
else
daemon_prog="${PROG}"
fi
json_select config
json_get_var loglevel loglevel 4
procd_open_instance "${name}"
procd_set_param command ${daemon_prog}
# Only add parameters for dm-service, not for dm-agent
if [ "${daemon_prog}" = "${PROG}" ]; then
procd_append_param command -m "${name}"
procd_append_param command -l "${loglevel}"
fi
if [ "${enable_core}" -eq "1" ]; then
procd_set_param limits core="unlimited"
procd_set_param stdout 1
procd_set_param stderr 1
fi
procd_set_param respawn
procd_close_instance "${name}"
}
configure_bbfdm_micro_services()
{
local enable enable_core
config_load bbfdm
validate_bbfdm_micro_service_section || {
log "Validation of micro_service section failed"
return 1;
}
[ "${enable}" -eq "0" ] && return 0
if [ -d "${BBFDM_MICROSERVICE_DIR}" ]; then
FILES="$(ls -1 ${BBFDM_MICROSERVICE_DIR}/*.json)"
for file in $FILES;
do
[ -e "$file" ] || continue
_add_microservice $file "${enable_core}"
done
fi
}
_start_single_service()
{
local service file
service="${1}"
if [ -d "${BBFDM_MICROSERVICE_DIR}" ]; then
file="$(ls -1 ${BBFDM_MICROSERVICE_DIR}/${service}.json)"
[ -e "$file" ] || return
_add_microservice $file "0"
fi
}
start_service()
{
if [ -n "${1}" ]; then
_start_single_service "${1}"
else
configure_bbfdm_micro_services
fi
}

View File

@@ -1,56 +0,0 @@
#!/bin/sh /etc/rc.common
START=97
STOP=06
USE_PROCD=1
PROG=/usr/sbin/bbfdmd
. /usr/share/libubox/jshn.sh
log() {
echo "${@}"|logger -t bbfdmd.init -p info
}
validate_bbfdm_bbfdmd_section()
{
uci_validate_section bbfdm bbfdmd "bbfdmd" \
'enable:bool:true' \
'debug:bool:false' \
'loglevel:uinteger:4'
}
configure_bbfdmd()
{
local enable debug
local jlog jrefresh jtimeout jlevel
config_load bbfdm
validate_bbfdm_bbfdmd_section || {
log "Validation of bbfdmd section failed"
return 1;
}
[ "${enable}" -eq 0 ] && return 0
procd_set_param command ${PROG}
procd_append_param command -l "${loglevel}"
if [ "${debug}" -eq 1 ]; then
procd_set_param stdout 1
procd_set_param stderr 1
fi
}
start_service()
{
procd_open_instance "bbfdm"
configure_bbfdmd
procd_set_param respawn
procd_close_instance "bbfdm"
}
service_triggers()
{
procd_add_reload_trigger "bbfdm"
}

View File

@@ -1,66 +0,0 @@
{
"hosts_refresh": {
"if" : [
{
"event": "host"
}
],
"then" : [
{
"object": "bbfdm.hostmngr",
"method":"refresh_references_db",
"args" : {},
"timeout": 1
}
]
},
"dhcp_refresh": {
"if_operator": "OR",
"if" : [
{
"event": "host"
},
{
"event": "wifi.dataelements.Associated"
}
],
"then" : [
{
"object": "bbfdm.dhcpmngr",
"method":"refresh_references_db",
"args" : {},
"timeout": 1
}
]
},
"ieee1905_refresh_add": {
"if" : [
{
"event": "ieee1905.neighbor.add"
}
],
"then" : [
{
"object": "bbfdm.ieee1905",
"method":"refresh_references_db",
"args" : {},
"timeout": 1
}
]
},
"ieee1905_refresh_del": {
"if" : [
{
"event": "ieee1905.neighbor.del"
}
],
"then" : [
{
"object": "bbfdm.ieee1905",
"method":"refresh_references_db",
"args" : {},
"timeout": 1
}
]
}
}

View File

@@ -1,11 +0,0 @@
#!/bin/sh
# rename bbfdmd enabled option to enable
val="$(uci -q get bbfdm.bbfdmd.enabled)"
if [ -n "${val}" ]; then
uci -q set bbfdm.bbfdmd.enabled=""
uci -q set bbfdm.bbfdmd.enable="${val}"
fi
exit 0

View File

@@ -1,58 +0,0 @@
#!/bin/sh
. /lib/functions.sh
bbfdm_sysctl_conf="/etc/bbfdm/sysctl.conf"
update_device_section() {
local section="${1}"
local dev_name="${2}"
local ipv6="${3}"
local name
# Get name value
config_get name "${section}" name
# Retrun if the name value is different to the dev_name value
[ "${name}" != "${dev_name}" ] && return
if [ "${ipv6}" = "0" ]; then
ipv6="1"
else
ipv6="0"
fi
# Add ipv6 option
uci -q set network.${section}.ipv6="${ipv6}"
}
parse_bbfdm_sysctl_conf_file() {
# Check if the file exists
[ -f "${bbfdm_sysctl_conf}" ] || return
# Create a temporary file
tmpfile=$(mktemp)
# Load network config
config_load network
# Read each line of the file
while read -r line; do
if echo "$line" | grep -Eq '^net\.ipv6\.conf\.(.+)\.disable_ipv6=([0-1])$'; then
name=$(echo "$line" | sed -n 's/^net\.ipv6\.conf\.\(.*\)\.disable_ipv6=[0-1]$/\1/p')
value=$(echo "$line" | sed -n 's/^net\.ipv6\.conf\.\(.*\)\.disable_ipv6=\([0-1]\)$/\2/p')
config_foreach update_device_section device "${name}" "${value}"
else
# If the line doesn't match, preserve it in the temporary file
echo "$line" >> "$tmpfile"
fi
done < "${bbfdm_sysctl_conf}"
# Replace the original file with the modified content
mv "$tmpfile" "${bbfdm_sysctl_conf}"
}
parse_bbfdm_sysctl_conf_file
exit 0

View File

@@ -1,2 +0,0 @@
uci -q set ruleng.bbfdm=rule
uci -q set ruleng.bbfdm.recipe='/etc/ruleng/bbfdm.json'

View File

@@ -1 +0,0 @@
/etc/bbfdm/sysctl.conf

View File

@@ -1,203 +0,0 @@
#!/bin/bash
BBFDM_BASE_DM_PATH="usr/share/bbfdm"
BBFDM_INPUT_PATH="etc/bbfdm/micro_services"
INPUT_FILE="0"
MICRO_SERVICE=0
SCRIPT=0
DIAG=0
PLUGIN=0
DEST=""
VENDOR_EXTN=""
TOOLS="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
SRC=""
EXTRA_DATA=""
while getopts ":mpsdtv:" opt; do
case ${opt} in
m)
MICRO_SERVICE=1
;;
p)
PLUGIN=1
;;
s)
SCRIPT=1
;;
d)
DIAG=1
;;
t)
INPUT_FILE=1
;;
v)
VENDOR_EXTN=${OPTARG}
;;
?)
echo "Invalid option: ${OPTARG}"
exit 1
;;
esac
done
shift $((OPTIND-1))
SRC="${1}"
shift
DEST="${1}"
shift
DATA="${1}"
shift
EXTRA_DATA="${1}"
install_bin() {
if ! install -m0755 ${1} ${2}; then
echo "Failed to install bin ${1} => ${2}"
exit 1
fi
}
install_dir() {
if ! install -d -m0755 ${1}; then
echo "Failed to create directory ${1}"
exit 1
fi
}
install_data() {
if ! install -m0644 ${1} ${2}; then
echo "Failed to install ${1} => ${2}"
exit 1
fi
}
# Installing datamodel
bbfdm_install_dm()
{
local src dest minfile
src="$1"
dest="$2"
minfile=""
if [ -z ${src} ] || [ -z "${dest}" ] || [ -z "${TOOLS}" ]; then
echo "Invalid input option for install dm $@"
exit 1
fi
if [ ! -f "${src}" ]; then
echo "File $src does not exists..."
exit 1
fi
if [ "${src##*.}" = "json" ]; then
echo "Compacting BBFDM JSON file"
minfile=$(mktemp)
jq -c 'del(..|.description?)' ${src} > ${minfile}
if [ -n "${VENDOR_EXTN}" ]; then
sed -i "s/{BBF_VENDOR_PREFIX}/${VENDOR_EXTN}/g" ${minfile}
fi
src=${minfile}
if dpkg -s python3-jsonschema >/dev/null 2>&1; then
echo "Verifying bbfdm Datamodel JSON file"
if ! ${TOOLS}/validate_plugins.py ${src}; then
echo "Validation of the plugin failed ${src}"
exit 1
fi
else
echo "## Install python3-jsonschema to verify datamodel plugins"
fi
fi
install_bin ${src} ${dest}
if [ -f "${minfile}" ]; then
rm ${minfile}
fi
}
if [ -z "$SRC" ] || [ -z "${DEST}" ] ; then
echo "# BBFDM Null value in src[${SRC}], dest[${DEST}]"
exit 1
fi
if [ "${SCRIPT}" -eq "1" ]; then
if [ "${DIAG}" -eq "1" ]; then
install_dir ${DEST}/${BBFDM_BASE_DM_PATH}/scripts/bbf_diag
install_bin ${SRC} ${DEST}/${BBFDM_BASE_DM_PATH}/scripts/bbf_diag/
else
install_dir ${DEST}/${BBFDM_BASE_DM_PATH}/scripts
install_bin ${SRC} ${DEST}/${BBFDM_BASE_DM_PATH}/scripts/
fi
exit 0
fi
if [ "${INPUT_FILE}" -eq "1" ]; then
tempfile=""
if [ ! -f "${SRC}" ]; then
echo "# Datamodel Input file ${SRC} not available"
exit 1
fi
if ! cat ${SRC} |jq >/dev/null 2>&1; then
echo "# Invalid datamodel json input file"
exit 1
fi
service_name="$(cat ${SRC}|jq -r '.daemon.service_name')"
if [ -z "${service_name}" ]; then
echo "# service_name not defined in service json ...."
exit 1
fi
tempfile=$(mktemp)
cp ${SRC} ${tempfile}
if [ -n "${VENDOR_EXTN}" ]; then
sed -i "s/{BBF_VENDOR_PREFIX}/${VENDOR_EXTN}/g" ${tempfile}
fi
install_dir ${DEST}/etc/bbfdm/services
install_data ${tempfile} ${DEST}/etc/bbfdm/services/${service_name}.json
if [ -f "${tempfile}" ]; then
rm ${tempfile}
fi
exit 0
fi
if [ "${MICRO_SERVICE}" -eq "1" ]; then
if [ -z "${DATA}" ]; then
echo "# service_name[${DATA}] not provided"
exit 1
fi
if [ "${PLUGIN}" -ne "1" ]; then
extn="$(basename ${SRC})"
install_dir ${DEST}/${BBFDM_BASE_DM_PATH}/micro_services
bbfdm_install_dm ${SRC} ${DEST}/${BBFDM_BASE_DM_PATH}/micro_services/${DATA}.${extn##*.}
else
install_dir ${DEST}/${BBFDM_BASE_DM_PATH}/micro_services/${DATA}
bbfdm_install_dm ${SRC} ${DEST}/${BBFDM_BASE_DM_PATH}/micro_services/${DATA}/$(printf "%02d" ${EXTRA_DATA})$(basename ${SRC})
fi
else
if [ "${PLUGIN}" -eq "1" ]; then
echo "# WARNING: BBFDM_INSTALL_CORE_PLUGIN macro will be deprecated soon. Please use BBFDM_INSTALL_MS_PLUGIN macro instead, specifying 'core' as micro-service name #"
priority="${DATA:-0}"
install_dir ${DEST}/${BBFDM_BASE_DM_PATH}/micro_services/core
if [ "${priority}" -gt "0" ]; then
# install with priority if defined
bbfdm_install_dm ${SRC} ${DEST}/${BBFDM_BASE_DM_PATH}/micro_services/core/${priority}_$(basename ${SRC})
elif [ "${priority}" -eq "0" ]; then
bbfdm_install_dm ${SRC} ${DEST}/${BBFDM_BASE_DM_PATH}/micro_services/core/$(basename ${SRC})
else
echo "# Priority should be an unsigned integer"
exit 1
fi
fi
fi

View File

@@ -1,333 +0,0 @@
#!/usr/bin/python3
# Copyright (C) 2024 iopsys Software Solutions AB
# Author: Amin Ben Romdhane <amin.benromdhane@iopsys.eu>
import sys
import json
from jsonschema import validate
JSON_PLUGIN_VERSION = 0
obj_schema = {
"definitions": {
"type_t": {
"type": "string",
"enum": [
"object"
]
},
"map_type_t": {
"type": "string",
"enum": [
"uci",
"ubus"
]
},
"protocols_t": {
"type": "string",
"enum": [
"none",
"cwmp",
"usp"
]
}
},
"type" : "object",
"properties" : {
"type" : {"$ref": "#/definitions/type_t"},
"version" : {"type": "string"},
"protocols" : {"type" : "array", "items" : {"$ref": "#/definitions/protocols_t"}},
"uniqueKeys" : {"type" : "array"},
"access" : {"type" : "boolean"},
"array" : {"type" : "boolean"},
"mapping" : {"type" : "object", "properties" : {
"type" : {"$ref": "#/definitions/map_type_t"},
"uci" : {"type" : "object", "properties" : {
"file" : {"type": "string"},
"section" : {"type": "object", "properties" : {
"type" : {"type": "string"}
}
},
"dmmapfile" : {"type": "string"}
}
},
"ubus" : {"type" : "object", "properties" : {
"object" : {"type": "string"},
"method" : {"type": "string"},
"args" : {"type": "object"},
"key" : {"type": "string"}
}
}
}
}
},
"required": [
"type",
"protocols",
"array",
"access"
]
}
obj_schema_v1 = {
"definitions": {
"type_t": {
"type": "string",
"enum": [
"object"
]
},
"map_type_t": {
"type": "string",
"enum": [
"uci",
"ubus"
]
},
"protocols_t": {
"type": "string",
"enum": [
"cwmp",
"usp",
"none"
]
}
},
"type" : "object",
"properties" : {
"type" : {"$ref": "#/definitions/type_t"},
"version" : {"type": "string"},
"protocols" : {"type" : "array", "items" : {"$ref": "#/definitions/protocols_t"}},
"uniqueKeys" : {"type" : "array"},
"access" : {"type" : "boolean"},
"array" : {"type" : "boolean"},
"mapping" : {"type" : "array", "items" : {
"type" : "object", "properties" : {
"type" : {"$ref": "#/definitions/map_type_t"},
"uci" : {"type" : "object", "properties" : {
"file" : {"type": "string"},
"section" : {"type": "object", "properties" : {
"type" : {"type": "string"}
}
},
"dmmapfile" : {"type": "string"}
}
},
"ubus" : {"type" : "object", "properties" : {
"object" : {"type": "string"},
"method" : {"type": "string"},
"args" : {"type": "object"},
"key" : {"type": "string"}
}
}
}
}
}
},
"required": [
"type",
"protocols",
"array",
"access"
]
}
param_schema = {
"definitions": {
"type_t": {
"type": "string",
"enum": [
"string",
"unsignedInt",
"unsignedLong",
"int",
"long",
"boolean",
"dateTime",
"hexBinary",
"base64",
"decimal"
]
},
"map_type_t": {
"type": "string",
"enum": [
"uci",
"ubus",
"procfs",
"sysfs",
"json",
"uci_sec"
]
},
"protocols_t": {
"type": "string",
"enum": [
"cwmp",
"usp",
"none"
]
}
},
"type" : "object",
"properties" : {
"type" : {"$ref": "#/definitions/type_t"},
"protocols" : {"type" : "array", "items" : {"$ref": "#/definitions/protocols_t"}},
"read" : {"type" : "boolean"},
"write" : {"type" : "boolean"},
"mapping" : {"type" : "array", "items" : {"type": "object", "properties" : {
"type" : {"$ref": "#/definitions/map_type_t"},
"uci" : {"type" : "object", "properties" : {
"file" : {"type": "string"},
"section" : {"type": "object", "properties" : {
"type" : {"type": "string"},
"index" : {"type": "string"}
}
},
"option" : {"type": "object", "properties" : {
"name" : {"type": "string"} }
}
}
},
"ubus" : {"type" : "object", "properties" : {
"object" : {"type": "string"},
"method" : {"type": "string"},
"args" : {"type": "object"},
"key" : {"type": "string"}
}
},
"procfs" : {"type" : "object", "properties" : {
"file" : {"type": "string"}
}
},
"sysfs" : {"type" : "object", "properties" : {
"file" : {"type": "string"}
}
}
}
}
}
},
"required": [
"type",
"protocols",
"read",
"write"
]
}
event_schema = {
"definitions": {
"type_t": {
"type": "string",
"enum": [
"event"
]
},
"protocols_t": {
"type": "string",
"enum": [
"none",
"usp"
]
}
},
"type" : "object",
"properties" : {
"type" : {"$ref": "#/definitions/type_t"},
"version" : {"type": "string"},
"protocols" : {"type" : "array", "items" : {"$ref": "#/definitions/protocols_t"}}
},
"required": [
"type",
"protocols"
]
}
command_schema = {
"definitions": {
"type_t": {
"type": "string",
"enum": [
"command"
]
},
"protocols_t": {
"type": "string",
"enum": [
"none",
"usp"
]
}
},
"type" : "object",
"properties" : {
"type" : {"$ref": "#/definitions/type_t"},
"async" : {"type" : "boolean"},
"protocols" : {"type" : "array", "items" : {"$ref": "#/definitions/protocols_t"}},
"input" : {"type" : "object"},
"output" : {"type" : "object"}
},
"required": [
"type",
"async",
"protocols"
]
}
def print_validate_json_usage():
print("Usage: " + sys.argv[0] + " <dm json file>")
print("Examples:")
print(" - " + sys.argv[0] + " datamodel.json")
print(" ==> Validate the json file")
print("")
exit(1)
def parse_value( key , value ):
if key.endswith('.') and not key.startswith('Device.'):
print(key + " is not a valid path")
exit(1)
if key.endswith('.') and (JSON_PLUGIN_VERSION == 1 or JSON_PLUGIN_VERSION == 2):
__schema = obj_schema_v1
elif key.endswith('.'):
__schema = obj_schema
elif key.endswith('!'):
__schema = event_schema
elif key.endswith('()'):
__schema = command_schema
else:
__schema = param_schema
validate(instance = value, schema = __schema)
for k, v in value.items():
if k != "list" and k != "mapping" and k != "input" and k != "output" and isinstance(v, dict):
parse_value(k, v)
### main ###
if len(sys.argv) < 2:
print_validate_json_usage()
json_file = open(sys.argv[1], "r", encoding='utf-8')
try:
json_data = json.loads(json_file.read())
except ValueError:
print(sys.argv[1] + " file has a wrong JSON format!!!!!")
exit(1)
for __key, __value in json_data.items():
if __key == "json_plugin_version":
if not isinstance(__value, int) or __value not in [0, 1, 2]:
raise ValueError("Invalid value for json_plugin_version")
JSON_PLUGIN_VERSION = __value
continue
parse_value(__key , __value)
print("JSON File is Valid")

View File

@@ -1,45 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2025 IOPSYS Software Solutions AB
include $(TOPDIR)/rules.mk
PKG_NAME:=blkpg-part
PKG_VERSION:=1
PKG_RELEASE:=1
PKG_SOURCE_VERSION:=5a4ec5f53ed904b37fba03f3797fbe2af3077f8d
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://github.com/gportay/blkpg-part.git
PKG_MIRROR_HASH:=skip
PKG_MAINTAINER:=Andreas Gnau <andreas.gnau@iopsys.eu>
PKG_LICENSE:=LGPL-2.1-or-later
PKG_LICENSE_FILES:=LICENSE
PKG_BUILD_PARALLEL:=1
PKG_INSTALL:=1
include $(INCLUDE_DIR)/package.mk
MAKE_INSTALL_FLAGS += PREFIX=/usr
define Package/$(PKG_NAME)
SECTION:=utils
CATEGORY:=Utilities
SUBMENU:=Disc
TITLE:=User space partition table and disk geometry handling utility
URL:=https://www.portay.io/blkpg-part/
endef
define Package/$(PKG_NAME)/description
blkpg-part creates temporary partitions that are not part of the GPT/MBR.
It makes a partition block device from any consecutive blocks that are
not partitioned. It creates, resizes and deletes partitions on the fly
without writing back the changes to the partition table.
endef
define Package/$(PKG_NAME)/install
$(INSTALL_DIR) $(1)/usr/sbin
$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/sbin/blkpg-part $(1)/usr/sbin/
endef
$(eval $(call BuildPackage,$(PKG_NAME)))

View File

@@ -1,40 +1,40 @@
#
# Copyright (C) 2022 iopsys Software Solutions AB
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
PKG_NAME:=bulkdata
PKG_VERSION:=2.1.20
PKG_VERSION:=2.0.3
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/bbf/bulkdata.git
PKG_SOURCE_VERSION:=a5e57962938ca143ede65d92be90b6e9fce66e15
PKG_SOURCE_URL:=https://dev.iopsys.eu/iopsys/bulkdata.git
PKG_SOURCE_VERSION:=85d7486a21a9eb0e0f345b587b3ac506edcb72fe
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif
PKG_LICENSE:=BSD-3-Clause
PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
PKG_LICENSE:=GPL-2.0-only
PKG_LICENSE_FILES:=LICENSE
include $(INCLUDE_DIR)/package.mk
include $(TOPDIR)/feeds/iopsys/bbfdm/bbfdm.mk
PKG_BUILD_DEPENDS:=bbf
define Package/bulkdata
include $(INCLUDE_DIR)/package.mk
define Package/$(PKG_NAME)
SECTION:=utils
CATEGORY:=Utilities
SUBMENU:=TRx69
TITLE:=BBF BulkData Collection
DEPENDS:=+ubus +libuci +libubox +libjson-c +libcurl +libblobmsg-json +zlib
DEPENDS+=+libbbfdm-api +libbbfdm-ubus
endef
define Package/bulkdata/description
BulkData daemon for TR069 with bbfdm backend.
DEPENDS:=+libubus +libuci +libubox +libjson-c +libcurl +libblobmsg-json
endef
TARGET_CFLAGS += \
@@ -47,15 +47,12 @@ define Build/Prepare
endef
endif
define Package/bulkdata/install
define Package/$(PKG_NAME)/install
$(INSTALL_DIR) $(1)/usr/sbin
$(INSTALL_DIR) $(1)/etc/config
$(INSTALL_DIR) $(1)/etc/init.d
$(INSTALL_BIN) $(PKG_BUILD_DIR)/bulkdatad $(1)/usr/sbin/
$(INSTALL_DATA) ./files/etc/config/bulkdata $(1)/etc/config/
$(INSTALL_BIN) ./files/etc/init.d/bulkdatad $(1)/etc/init.d/
$(BBFDM_REGISTER_SERVICES) ./bbfdm_service.json $(1) $(PKG_NAME)
$(INSTALL_DIR) $(1)/usr/lib/bbfdm
$(INSTALL_BIN) $(PKG_BUILD_DIR)/bbf_plugin/*.so $(1)/usr/lib/bbfdm
$(CP) ./files/* $(1)/
endef
$(eval $(call BuildPackage,bulkdata))
$(eval $(call BuildPackage,$(PKG_NAME)))

View File

@@ -1,17 +0,0 @@
{
"daemon": {
"enable": "1",
"service_name": "bulkdata",
"unified_daemon": true,
"proto": "cwmp",
"services": [
{
"parent_dm": "Device.",
"object": "BulkData"
}
],
"config": {
"loglevel": "3"
}
}
}

View File

@@ -1,6 +1,6 @@
config bulkdata 'bulkdata'
option enable '0'
#Log levels: As per syslog 0-7, default 6=>LOG_INFO
option loglevel '3'
#Log levels: Error=1, Warning=2, Info=3, Debug=4
option log_level '3'

View File

@@ -1,41 +1,34 @@
#!/bin/sh /etc/rc.common
START=60
START=99
STOP=10
USE_PROCD=1
PROG="/usr/sbin/bulkdatad"
validate_global_section() {
uci_validate_section bulkdata bulkdata bulkdata \
'enable:bool:1' \
'loglevel:uinteger:3'
start_service() {
local enable=$(uci -q get bulkdata.bulkdata.enable)
[ "$enable" != "1" ] && {
return 0
}
procd_open_instance
procd_set_param command "$PROG"
procd_set_param respawn "3" "7" "0"
procd_close_instance
}
start_service() {
local enable loglevel
validate_global_section
procd_open_instance "bulkdata"
procd_set_param command "$PROG"
procd_append_param command -l ${loglevel}
procd_set_param respawn
procd_close_instance "bulkdata"
boot() {
start
}
reload_service() {
ret=$(ubus call service list '{"name":"bulkdatad"}' | jsonfilter -qe '@.bulkdatad.instances.bulkdata.running')
if [ "$ret" != "true" ]; then
stop
start
else
ubus send bulkdata.reload
fi
return 0
stop
start
}
service_triggers() {
service_triggers()
{
procd_add_reload_trigger bulkdata
}

View File

@@ -0,0 +1,75 @@
#!/bin/sh
. /lib/functions.sh
PROFILE_COUNT=1
get_next_count()
{
local config="$1"
local default_name="${2}"
local count=${3}
local found=0
if [ -z "$count" ]; then
count=1
fi
while [ "${found}" -ne 1 ]; do
uci -q get ${config}.${default_name}_${count} >/dev/null
if [ $? -eq 0 ]; then
count=$((count + 1))
else
found=1;
fi
done
echo "${default_name}_${count}"
}
translate_profile_id_to_profile_name() {
local section="${1}"
local profile_id="${2}"
local profile_name="${3}"
local curr_profile_id
config_get curr_profile_id "${section}" profile_id
[ -n "${curr_profile_id}" ] || return
[ "${curr_profile_id}" != "${profile_id}" ] && return
uci -q set bulkdata.${section}.profile_name="${profile_name}"
uci -q set bulkdata.${section}.profile_id=""
}
update_profile_sections() {
local section="${1}"
local default="${2}"
local profile_name
config_get profile_id "${section}" profile_id
[ -n "${profile_id}" ] || return
case "${section}" in
"cfg"*)
profile_name="$(get_next_count bulkdata ${default} ${PROFILE_COUNT})"
uci_rename bulkdata "${section}" "${profile_name}"
;;
esac
PROFILE_COUNT=$((PROFILE_COUNT + 1))
[ -n "$profile_name" ] && section="${profile_name}"
uci -q set bulkdata.${section}.profile_id=""
config_foreach translate_profile_id_to_profile_name profile_parameter "${profile_id}" "${profile_name}"
config_foreach translate_profile_id_to_profile_name profile_http_request_uri_parameter "${profile_id}" "${profile_name}"
}
config_load bulkdata
config_foreach update_profile_sections profile profile
uci commit bulkdata
exit 0

View File

@@ -1,35 +0,0 @@
#!/bin/sh /etc/rc.common
START=98
STOP=20
USE_PROCD=1
start_service() {
if [ -e "/etc/config/mapagent" -o -e "/etc/config/mapcontroller" ]; then
config_load mapagent
config_get_bool agent_enabled agent enabled 1
config_load mapcontroller
config_get_bool controller_enabled controller enabled 1
if [ $agent_enabled -eq 1 -o $controller_enabled -eq 1 ]; then
procd_open_instance
procd_set_param command "/usr/sbin/capiagent" "-p 9000"
procd_set_param limits core="unlimited"
#procd_set_param respawn
#procd_set_param stdout 1
#procd_set_param stderr 1
procd_close_instance
fi
fi
}
service_triggers()
{
procd_add_reload_trigger "capiagent"
}
reload_service() {
stop
start
}

View File

@@ -1,52 +0,0 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=cmph
PKG_VERSION:=2.0.2
PKG_RELEASE:=1
PKG_SOURCE:=cmph-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=@SF/cmph/
PKG_MD5SUM:=51ec5329b47774d251a96eaaafdb409e
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
PKG_INSTALL_DIR:=$(PKG_BUILD_DIR)/ipkg-install
PKG_FIXUP:=autoreconf
PKG_LICENSE:=LGPLv2
PKG_LICENSE_FILES:=LGPL-2
include $(INCLUDE_DIR)/package.mk
define Package/$(PKG_NAME)
SECTION:=libs
CATEGORY:=Libraries
TITLE:=C Minimal Perfect Hashing library
URL:=https://sourceforge.net/projects/cmph/
endef
define Package/$(PKG_NAME)/description
C Minimal Perfect Hashing (CMPH) library allows the creation of minimal perfect hash functions for large data sets.
endef
define Build/Configure
$(call Build/Configure/Default)
endef
define Build/InstallDev
$(INSTALL_DIR) $(1)/usr/include
$(INSTALL_DATA) $(PKG_BUILD_DIR)/src/cmph.h $(1)/usr/include/
$(INSTALL_DATA) $(PKG_BUILD_DIR)/src/cmph_time.h $(1)/usr/include/
$(INSTALL_DATA) $(PKG_BUILD_DIR)/src/cmph_types.h $(1)/usr/include/
$(INSTALL_DIR) $(1)/usr/lib
$(CP) $(PKG_BUILD_DIR)/src/.libs/libcmph.a $(1)/usr/lib/
$(CP) $(PKG_BUILD_DIR)/src/.libs/libcmph.so* $(1)/usr/lib/
endef
define Package/$(PKG_NAME)/install
$(INSTALL_DIR) $(1)/usr/lib
$(CP) $(PKG_BUILD_DIR)/src/.libs/libcmph.so* $(1)/usr/lib/
endef
$(eval $(call BuildPackage,$(PKG_NAME)))

View File

@@ -1,64 +0,0 @@
#
# Copyright (C) 2020-2024 IOPSYS Software Solutions AB
#
include $(TOPDIR)/rules.mk
PKG_NAME:=csmngr
PKG_VERSION:=1.0.2
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=5e50fe388fff29b08d895c1c580152cccfa290ad
PKG_SOURCE_URL:=https://dev.iopsys.eu/iopsys/csmngr.git
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)_$(PKG_SOURCE_VERSION).tar.xz
PKG_MIRROR_HASH:=skip
endif
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
PKG_MAINTAINER:=Anjan Chanda <anjan.chanda@iopsys.eu>
PKG_LICENSE:=BSD-3-Clause
PKG_LICENSE_FILES:=LICENSE
PKG_CONFIG_DEPENDS := \
CONFIG_PACKAGE_libwifiutils \
CONFIG_PACKAGE_libwifi
PKG_BUILD_DEPENDS := libwifi
include $(INCLUDE_DIR)/package.mk
MAKE_PATH:=src
define Package/csmngr
SECTION:=utils
CATEGORY:=Utilities
TITLE:=WiFi channel selection manager
DEPENDS:=+libwifiutils +libwifi +libuci +libubox +ubus +libnl-genl
endef
define Package/csmngr/description
WiFi Auto Channel Selection manager.
endef
TARGET_CFLAGS += \
-I$(STAGING_DIR)/usr/include \
-I$(STAGING_DIR)/usr/include/libnl3 \
-D_GNU_SOURCE
ifeq ($(LOCAL_DEV),1)
define Build/Prepare
rsync -r --exclude=.* ~/git/csmngr/ $(PKG_BUILD_DIR)/
endef
endif
define Package/csmngr/install
$(CP) ./files/* $(1)/
$(INSTALL_DIR) $(1)/usr/sbin
$(INSTALL_BIN) $(PKG_BUILD_DIR)/src/csmngr $(1)/usr/sbin/
endef
$(eval $(call BuildPackage,csmngr))

View File

@@ -1,16 +0,0 @@
if PACKAGE_ddnsmngr
choice
prompt "Select backend for dynamic DNS management"
default DDNSMNGR_BACKEND_DDNSSCRIPT
depends on PACKAGE_ddnsmngr
help
Select which package to use for dynamic DNS support
config DDNSMNGR_BACKEND_DDNSSCRIPT
bool "Use ddns_script"
config DDNSMNGR_BACKEND_INADYN
bool "Use inadyn"
endchoice
endif

View File

@@ -1,79 +0,0 @@
#
# Copyright (C) 2024 IOPSYS
#
include $(TOPDIR)/rules.mk
PKG_NAME:=ddnsmngr
PKG_VERSION:=1.0.12
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/network/ddnsmngr.git
PKG_SOURCE_VERSION:=44af9a7b3fec3929f8554af9633a5b8068189b48
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif
PKG_LICENSE:=BSD-3-Clause
PKG_LICENSE_FILES:=LICENSE
PKG_CONFIG_DEPENDS:=CONFIG_DDNSMNGR_BACKEND_DDNSSCRIPT CONFIG_DDNSMNGR_BACKEND_INADYN
include $(INCLUDE_DIR)/package.mk
include $(TOPDIR)/feeds/iopsys/bbfdm/bbfdm.mk
define Package/$(PKG_NAME)
SECTION:=utils
CATEGORY:=Utilities
SUBMENU:=TRx69
TITLE:=Dynamic DNS manager
DEPENDS:=+DDNSMNGR_BACKEND_DDNSSCRIPT:ddns-scripts +DDNSMNGR_BACKEND_INADYN:inadyn
DEPENDS+=+libbbfdm-api +libbbfdm-ubus +dm-service
MENU:=1
endef
define Package/$(PKG_NAME)/config
source "$(SOURCE)/Config.in"
endef
MAKE_PATH:=src
define Package/$(PKG_NAME)/description
Manage dynamic DNS updation and provides Device.DynamicDNS. datamodel object based on TR181-2.16
endef
ifeq ($(LOCAL_DEV),1)
define Build/Prepare
$(CP) -rf ~/git/ddnsmngr/* $(PKG_BUILD_DIR)/
endef
endif
define Package/$(PKG_NAME)/install
$(INSTALL_DIR) $(1)/etc/ddnsmngr/ddns
$(INSTALL_DIR) $(1)/etc/ddnsmngr/servers
$(INSTALL_DIR) $(1)/usr/lib/ddnsmngr
$(INSTALL_DIR) $(1)/etc/init.d
$(INSTALL_DIR) $(1)/etc/config
$(INSTALL_DIR) $(1)/etc/uci-defaults
$(INSTALL_DIR) $(1)/usr/libexec/rpcd
$(INSTALL_DATA) ./files/etc/config/ddnsmngr $(1)/etc/config/ddnsmngr
$(INSTALL_BIN) ./files/etc/uci-defaults/01-ddns-config-migrate $(1)/etc/uci-defaults/
$(INSTALL_BIN) ./files/etc/init.d/ddnsmngr $(1)/etc/init.d/ddnsmngr
ifeq ($(CONFIG_DDNSMNGR_BACKEND_DDNSSCRIPT),y)
$(INSTALL_BIN) ./files/usr/lib/ddns_script/ddnsmngr_service.sh $(1)/usr/lib/ddnsmngr/
$(INSTALL_BIN) $(PKG_BUILD_DIR)/files/ddns-script/usr/lib/ddnsmngr/ddnsmngr_updater.sh $(1)/usr/lib/ddnsmngr/
$(INSTALL_DATA) $(PKG_BUILD_DIR)/files/ddns-script/server/* $(1)/etc/ddnsmngr/servers
$(INSTALL_BIN) $(PKG_BUILD_DIR)/files/ddns-script/usr/libexec/rpcd/ddnsmngr $(1)/usr/libexec/rpcd/ddnsmngr
endif
ifeq ($(CONFIG_DDNSMNGR_BACKEND_INADYN),y)
$(INSTALL_BIN) ./files/usr/lib/inadyn/ddnsmngr_service.sh $(1)/usr/lib/ddnsmngr/
$(INSTALL_DATA) $(PKG_BUILD_DIR)/files/inadyn/server/* $(1)/etc/ddnsmngr/servers
$(INSTALL_BIN) $(PKG_BUILD_DIR)/files/inadyn/usr/libexec/rpcd/ddnsmngr $(1)/usr/libexec/rpcd/ddnsmngr
endif
$(BBFDM_REGISTER_SERVICES) ./bbfdm_service.json $(1) $(PKG_NAME)
$(BBFDM_INSTALL_MS_DM) $(PKG_BUILD_DIR)/src/libddnsmngr.so $(1) $(PKG_NAME)
endef
$(eval $(call BuildPackage,$(PKG_NAME)))

View File

@@ -1,16 +0,0 @@
{
"daemon": {
"enable": "1",
"service_name": "ddnsmngr",
"unified_daemon": false,
"services": [
{
"parent_dm": "Device.",
"object": "DynamicDNS"
}
],
"config": {
"loglevel": "3"
}
}
}

View File

@@ -1,23 +0,0 @@
config ddnsmngr 'global'
option configfile '/var/run/ddnsmngr/ddnsmngr.json'
option ddns_dateformat '%F %R'
option ddns_rundir '/var/run/ddnsmngr'
option ddns_logdir '/var/log/ddnsmngr'
option ddns_loglines '250'
option upd_privateip '0'
option use_curl '1'
config server 'ddns_server_1'
option enabled '1'
option service 'dynu.com'
option name 'dynu.com'
config server 'ddns_server_2'
option enabled '1'
option service 'dyndns.org'
option name 'dyndns.org'
config server 'ddns_server_3'
option enabled '1'
option service 'zoneedit.com'
option name 'zoneedit.com'

View File

@@ -1,26 +0,0 @@
#!/bin/sh /etc/rc.common
START=80
STOP=10
USE_PROCD=1
. /usr/lib/ddnsmngr/ddnsmngr_service.sh
start_service() {
start_ddnsmngr_service
}
stop_service() {
stop_ddnsmngr_service
}
reload_service() {
stop
sleep 1
start
}
service_triggers() {
procd_add_reload_trigger ddnsmngr
add_ddnsmngr_triggers
}

View File

@@ -1,170 +0,0 @@
#!/bin/sh
. /lib/functions.sh
cl_id=1
srv_id=1
function get_ddns_config_option() {
local val
val="$(uci -q get ddns.${1}.${2})"
if [ -z "${val}" ] && [ -n "${3}" ]; then
val="${3}"
fi
echo "${val}"
}
function config_supported_service() {
if ! uci -q get ddnsmngr.global >/dev/null 2>&1; then
uci -q set ddnsmngr.global="ddnsmngr"
fi
servers=""
for i in $(find /etc/ddnsmngr/servers/ -name '*.json' | cut -d'/' -f 5 | sed "s/.json//")
do
if [ -z "${servers}" ]; then
servers="${i}"
else
servers="${servers},${i}"
fi
done
uci -q set ddnsmngr.global.supported_services="${servers}"
}
function migrate_service_section() {
client_sec=""
enabled="$(get_ddns_config_option ${1} enabled "0")"
service_name="$(get_ddns_config_option ${1} service_name)"
interface="$(get_ddns_config_option ${1} interface)"
ip_network="$(get_ddns_config_option ${1} ip_network)"
username="$(get_ddns_config_option ${1} username)"
password="$(get_ddns_config_option ${1} password)"
lookup_host="$(get_ddns_config_option ${1} lookup_host)"
use_ipv6="$(get_ddns_config_option ${1} use_ipv6 "0")"
force_ipversion="$(get_ddns_config_option ${1} force_ipversion "0")"
use_https="$(get_ddns_config_option ${1} use_https "0")"
force_dnstcp="$(get_ddns_config_option ${1} force_dnstcp "0")"
if [ -z "${service_name}" ]; then
uci -q delete ddns."${1}"
return 0
fi
# check server file is present in device
if [ ! -f "/etc/ddnsmngr/servers/${service_name}.json" ]; then
uci -q delete ddns."${1}"
return 0
fi
# Check if client section is already added for this service
clients=$(uci -q show ddnsmngr | grep "=client")
client_count=$(echo "${clients}" | wc -l)
tmp=0
while [ $tmp -lt $client_count ]
do
mngr_serv="$(uci -q get ddnsmngr.@client[$tmp].service_name)"
mngr_intf="$(uci -q get ddnsmngr.@client[$tmp].interface)"
mngr_netw="$(uci -q get ddnsmngr.@client[$tmp].ip_network)"
mngr_user="$(uci -q get ddnsmngr.@client[$tmp].username)"
mngr_ipv6="$(uci -q get ddnsmngr.@client[$tmp].use_ipv6)"
mngr_forceip="$(uci -q get ddnsmngr.@client[$tmp].force_ipversion)"
mngr_https="$(uci -q get ddnsmngr.@client[$tmp].use_https)"
mngr_dnstcp="$(uci -q get ddnsmngr.@client[$tmp].force_dnstcp)"
[ -z "${mngr_ipv6}" ] && mngr_ipv6="0"
[ -z "${mngr_forceip}" ] && mngr_forceip="0"
[ -z "${mngr_https}" ] && mngr_https="0"
[ -z "${mngr_dnstcp}" ] && mngr_dnstcp="0"
if [ "${mngr_serv}" == "${service_name}" ] && [ "${mngr_intf}" == "${interface}" ] && \
[ "${mngr_netw}" == "${ip_network}" ] && [ "${mngr_user}" == "${username}" ] && \
[ "${mngr_ipv6}" == "${use_ipv6}" ] && [ "${mngr_forceip}" == "${force_ipversion}" ] && \
[ "${mngr_https}" == "${use_https}" ] && [ "${mngr_dnstcp}" == "${force_dnstcp}" ]; then
break
fi
tmp=$(( tmp + 1 ))
done
if [ $tmp -ne $client_count ]; then
i=0
for client in $clients; do
if [ $i -eq $tmp ]; then
client_sec="$(echo $client | cut -d'=' -f 1 | cut -d'.' -f 2)"
break
fi
i=$(( i + 1 ))
done
if [ $enabled -eq 1 ]; then
uci -q set ddnsmngr."${client_sec}".enabled="1"
fi
else
client_sec=ddns_mig_client_"${cl_id}"
uci -q set ddnsmngr."${client_sec}"="client"
uci -q set ddnsmngr."${client_sec}".enabled="${enabled}"
uci -q set ddnsmngr."${client_sec}".service_name="${service_name}"
uci -q set ddnsmngr."${client_sec}".interface="${interface}"
uci -q set ddnsmngr."${client_sec}".ip_network="${ip_network}"
uci -q set ddnsmngr."${client_sec}".username="${username}"
uci -q set ddnsmngr."${client_sec}".password="${password}"
uci -q set ddnsmngr."${client_sec}".use_ipv6="${use_ipv6}"
uci -q set ddnsmngr."${client_sec}".force_ipversion="${force_ipversion}"
uci -q set ddnsmngr."${client_sec}".use_https="${use_https}"
uci -q set ddnsmngr."${client_sec}".force_dnstcp="${force_dnstcp}"
cl_id=$(( cl_id + 1 ))
# add server section if not added
servers=$(uci -q show ddnsmngr | grep "service=\'${service_name}\'")
if [ -z "${servers}" ]; then
server_sec=ddns_mig_server_"${srv_id}"
uci -q set ddnsmngr."${server_sec}"="server"
uci -q set ddnsmngr."${server_sec}".enabled="1"
uci -q set ddnsmngr."${server_sec}".service="${service_name}"
uci -q set ddnsmngr."${server_sec}".name="${service_name}"
srv_id=$(( srv_id + 1 ))
fi
fi
# if lookup_host is set then add host section
if [ -n "${lookup_host}" ] && [ -n "${client_sec}" ]; then
# check number of hosts present for this client
host_count=$(uci -q show ddnsmngr | grep "dm_parent=\'${client_sec}\'" | wc -l)
host_ix=$(( host_count + 1 ))
host_sec="${client_sec}"_host_"${host_ix}"
uci -q set ddnsmngr."${host_sec}"="host"
uci -q set ddnsmngr."${host_sec}".enabled="${enabled}"
uci -q set ddnsmngr."${host_sec}".lookup_host="${lookup_host}"
uci -q set ddnsmngr."${host_sec}".dm_parent="${client_sec}"
fi
uci -q delete ddns."${1}"
}
function migrate_ddns_config() {
if [ ! -f "/etc/config/ddnsmngr" ]; then
# Create ddnsmngr config file
touch /etc/config/ddnsmngr
fi
config_supported_service
config_load ddns
config_foreach migrate_service_section service
uci -q commit ddns
uci -q commit ddnsmngr
}
migrate_ddns_config

View File

@@ -1,169 +0,0 @@
#! /bin/sh
RUNDIR="/var/run/ddnsmngr"
LOGDIR="/var/log/ddnsmngr"
PROG="/usr/lib/ddnsmngr/ddnsmngr_updater.sh"
CONFIGFILE="/var/run/ddnsmngr/ddnsmngr.json"
CLIENT_INTFS=""
. /usr/share/libubox/jshn.sh
log() {
echo "$*"|logger -t ddnsmngr.init -p debug
}
validate_host_section() {
uci_validate_section ddnsmngr host "${1}" \
'enabled:bool:0' \
'lookup_host:string' \
'dm_parent:string'
}
validate_client_section() {
uci_validate_section ddnsmngr client "${1}" \
'enabled:bool:0' \
'service_name:string' \
'interface:string' \
'ip_network:string' \
'username:string' \
'password:string' \
'use_https:bool:0' \
'force_dnstcp:bool:0' \
'use_ipv6:bool:0' \
'force_ipversion:bool:0'
}
add_object() {
local enabled lookup_host dm_parent use_ipv6 force_ipversion proc_info_file
local service_name interface ip_network username password use_https force_dnstcp
validate_host_section "${1}" || {
log "Validation of host section failed"
return 0
}
if [ "${enabled}" -ne 1 ] || [ -z "${dm_parent}" ]; then
return 0
fi
validate_client_section "${dm_parent}" || {
log "Validation of client section failed"
return 0
}
if [ "${enabled}" -ne 1 ]; then
return 0
fi
service_name=$(uci -q get ddnsmngr.${dm_parent}.service_name)
if [ -z "${service_name}" ]; then
return 0
fi
service_section=$(uci -q show ddnsmngr | grep "service=\'${service_name}\'" | cut -d'.' -f 2 | head -1)
if [ -z "${service_section}" ]; then
return 0
fi
service_enabled=$(uci -q get ddnsmngr.${service_section}.enabled)
if [ "${service_enabled}" -ne 1 ]; then
return 0
fi
json_add_object
json_add_string "interface" "${interface}"
json_add_string "service_name" "${service_name}"
json_add_string "username" "${username}"
json_add_string "password" "${password}"
json_add_string "lookup_host" "${lookup_host}"
json_add_string "ip_network" "${ip_network}"
json_add_string "proc_info_file" "${1}"
json_add_string "use_ipv6" "${use_ipv6}"
json_add_string "force_ipversion" "${force_ipversion}"
json_add_string "use_https" "${use_https}"
json_add_string "force_dnstcp" "${force_dnstcp}"
json_close_object
if [ -z "${interface}" ]; then
if [ "${use_ipv6}" -eq 0 ]; then
interface="wan"
else
interface="wan6"
fi
fi
for intf in $CLIENT_INTFS; do
if [ "${intf}" == "${interface}" ]; then
return 0
fi
done
CLIENT_INTFS="${CLIENT_INTFS} ${interface}"
}
start_ddnsmngr_service() {
run_dir=$(uci -q get ddnsmngr.global.ddns_rundir)
log_dir=$(uci -q get ddnsmngr.global.ddns_logdir)
if [ -n "${run_dir}" ]; then
RUNDIR="${run_dir}"
fi
if [ -n "${log_dir}" ]; then
LOGDIR="${log_dir}"
fi
mkdir -p "${RUNDIR}"
mkdir -p "${LOGDIR}"
conf_file=$(uci -q get ddnsmngr.global.configfile)
if [ -n "${conf_file}" ]; then
CONFIGFILE="${conf_file}"
fi
touch "${CONFIGFILE}"
if [ ! -f "${CONFIGFILE}" ]; then
log "Can not create ${CONFIGFILE}, exit"
exit 0
fi
json_init
json_add_array "services"
config_load ddnsmngr
config_foreach add_object host
json_close_array
json_dump > "${CONFIGFILE}"
procd_open_instance ddnsmngr
procd_set_param command "$PROG"
procd_append_param command -c "${CONFIGFILE}"
procd_append_param command -- start
procd_close_instance
}
stop_ddnsmngr_service() {
conf_file=$(uci -q get ddnsmngr.global.configfile)
if [ -n "${conf_file}" ]; then
CONFIGFILE="${conf_file}"
fi
if [ ! -f "${CONFIGFILE}" ]; then
log "${CONFIGFILE} not found, can't stop services if running any"
fi
"$PROG" -c "${CONFIGFILE}" -- stop
return 0
}
add_ddnsmngr_triggers() {
procd_open_trigger
for intf in $CLIENT_INTFS; do
# No need to handle other ifevents like ifupdate etc
procd_add_interface_trigger "interface.*.up" $intf /etc/init.d/ddnsmngr restart
procd_add_interface_trigger "interface.*.down" $intf /etc/init.d/ddnsmngr restart
done
procd_close_trigger
}

View File

@@ -1,311 +0,0 @@
#!/bin/sh
PROG="/usr/sbin/inadyn"
CONFIGPATH="/tmp/inadyn_config"
PIDPATH="/etc/inadyn_pid"
CLIENT_INTFS=""
CONFIG_FILES=""
SERVER_PATH="/etc/ddnsmngr/servers"
FORMAT="custom [SECTION] {\n\tusername\t= [USER]\n\tpassword\t= [PWD]\n\tddns-server\t= [SERV]\n\tddns-path\t= [URI]\n\tssl\t\t= [SSL]\n\thostname\t= [NAME]\n\tcheckip-command\t= [CMD]\n\tddns-response\t= [RESPONSES]\n}"
. /usr/share/libubox/jshn.sh
log() {
echo "$*"|logger -t ddnsmngr.init -p debug
}
get_service_data() {
local provider="$1"
shift
local dir="$1"
shift
local ipv6="$1"
shift
local name data url answer script
[ $# -ne 2 ] && {
return 1
}
[ -f "${dir}/${provider}.json" ] || {
eval "$1=\"\""
eval "$2=\"\""
return 1
}
json_load_file "${dir}/${provider}.json"
json_get_var name "name"
if [ "$ipv6" -eq "1" ]; then
json_select "ipv6"
else
json_select "ipv4"
fi
json_get_var data "url"
json_get_var answer "answer"
json_select ".."
json_cleanup
response=""
if [ -n "${answer}" ]; then
answer=$(echo "${answer}" | sed 's/|/ /g')
for ans in $answer; do
if [ -z "${response}" ]; then
response="${ans}"
else
response="${response}, ${ans}"
fi
done
response="{ ${response} }"
fi
eval "$1=\"$data\""
eval "$2=\"$response\""
return 0
}
generate_inadyn_config() {
json_load "${1}"
json_get_var service_name service_name
json_get_var use_ipv6 use_ipv6
json_get_var interface interface
json_get_var username username
json_get_var password password
json_get_var host lookup_host
json_get_var conf_file config_file
json_get_var conf_dir config_dir
json_get_var server_address server_address
json_cleanup
if [ -z "${service_name}" ] || [ -z "${host}" ]; then
return 1
fi
if [ -z "${conf_file}" ]; then
return 1
fi
if [ -z "${conf_dir}" ]; then
return 1
fi
# First look into custom path to load the url otherwise default path
get_service_data "${service_name}" "${SERVER_PATH}" "${use_ipv6}" server_url server_answer
if [ -z "${server_url}" ]; then
return 1
fi
# Need to pick proto, server address and request uri separately from the url
# format http://[server_address]/[update_Request_uri]
proto=$(echo $server_url | cut -d':' -f 1)
serv=$(echo $server_url | cut -d'/' -f 3 | cut -d'@' -f 2)
uri=${server_url#*$serv}
if [ -z $proto ] || [ -z $serv ] || [ -z $uri ]; then
return 1
fi
path=$(echo "$uri" | sed 's/&/\\&/g')
update_uri=$(echo $path | sed -e "s#\[DOMAIN\]#%h#g" -e "s#\[PASSWORD\]#%p#g" \
-e "s#\[USERNAME\]#%u#g" -e "s#\[IP\]#%i#g")
if [ -z "${interface}" ]; then
if [ "${use_ipv6}" -eq 0 ]; then
interface="wan"
else
interface="wan6"
fi
fi
# now get the physical interface name
intf=$(ifstatus "${interface}" | jsonfilter -e '@.device')
if [ -z "${intf}" ]; then
return 1
fi
# command to get ip of the interface
if [ "${use_ipv6}" -eq 0 ]; then
get_ip="\"ifstatus ${interface} | jsonfilter -e '@[\\\\\"ipv4-address\\\\\"][0].address'\""
else
get_ip="\"ifstatus ${interface} | jsonfilter -e '@[\\\\\"ipv6-address\\\\\"][0].address'\""
fi
if [ "${proto}" = "http" ]; then
ssl="false"
else
ssl="true"
fi
inadyn_ver=$(inadyn -v)
user_agent="inadyn/${inadyn_ver}"
config_file="${conf_dir}/${conf_file}"
touch "${config_file}"
echo "iface = ${intf}" > "${config_file}"
echo "period = 600" >> "${config_file}"
echo "user-agent = ${user_agent}" >> "${config_file}"
if [ "${use_ipv6}" -eq 1 ]; then
echo "allow-ipv6 = true" >> "${config_file}"
fi
if [ -z "${password}" ]; then
FORMAT=$(echo "${FORMAT}" | sed 's/\\tpassword\\t= \[PWD\]\\n//g')
fi
if [ -z "${server_answer}" ]; then
FORMAT=$(echo "${FORMAT}" | sed 's/\\tddns-response\\t= \[RESPONSES\]\\n//g')
fi
config=$(echo $FORMAT | sed -e "s#\[SECTION\]#$conf_file#g" -e "s#\[PWD\]#$password#g" \
-e "s#\[USER\]#$username#g" -e "s#\[SERV\]#$serv#g" \
-e "s#\[URI\]#\"$update_uri\"#g" -e "s#\[SSL\]#$ssl#g" \
-e "s#\[NAME\]#$host#g" -e "s#\[CMD\]#$get_ip#g" \
-e "s#\[RESPONSES\]#$server_answer#g")
echo -e "\n\n${config}" >> "${config_file}"
return 0
}
validate_host_section() {
uci_validate_section ddnsmngr host "${1}" \
'enabled:bool:0' \
'lookup_host:string' \
'dm_parent:string'
}
validate_client_section() {
uci_validate_section ddnsmngr client "${1}" \
'enabled:bool:0' \
'service_name:string' \
'interface:string' \
'ip_network:string' \
'username:string' \
'password:string' \
'use_https:bool:0' \
'force_dnstcp:bool:0' \
'use_ipv6:bool:0' \
'force_ipversion:bool:0'
}
add_object() {
local enabled lookup_host dm_parent use_ipv6 force_ipversion
local service_name interface ip_network username password use_https force_dnstcp
validate_host_section "${1}" || {
log "Validation of host section failed"
return
}
if [ "${enabled}" -ne 1 ] || [ -z "${dm_parent}" ]; then
return
fi
validate_client_section "${dm_parent}" || {
log "Validation of client section failed"
return
}
if [ "${enabled}" -ne 1 ]; then
return
fi
service_name=$(uci -q get ddnsmngr.${dm_parent}.service_name)
if [ -z "${service_name}" ]; then
return
fi
service_section=$(uci show ddnsmngr | grep "service=\'${service_name}\'" | cut -d'.' -f 2 | head -1)
if [ -z "${service_section}" ]; then
return
fi
service_enabled=$(uci -q get ddnsmngr.${service_section}.enabled)
if [ "${service_enabled}" -ne 1 ]; then
return
fi
json_init
json_add_string "interface" "${interface}"
json_add_string "service_name" "${service_name}"
json_add_string "username" "${username}"
json_add_string "password" "${password}"
json_add_string "lookup_host" "${lookup_host}"
json_add_string "ip_network" "${ip_network}"
json_add_string "use_ipv6" "${use_ipv6}"
json_add_string "force_ipversion" "${force_ipversion}"
json_add_string "use_https" "${use_https}"
json_add_string "force_dnstcp" "${force_dnstcp}"
json_add_string "config_file" "${1}"
json_add_string "config_dir" "${CONFIGPATH}"
json_str=$(json_dump)
json_cleanup
generate_inadyn_config "${json_str}"
if [ "$?" -ne 0 ]; then
return
fi
CONFIG_FILES="${CONFIG_FILES} ${1}"
if [ -z "${interface}" ]; then
if [ "${use_ipv6}" -eq 0 ]; then
interface="wan"
else
interface="wan6"
fi
fi
for intf in $CLIENT_INTFS; do
if [ "${intf}" == "${interface}" ]; then
return
fi
done
CLIENT_INTFS="${CLIENT_INTFS} ${interface}"
}
start_ddnsmngr_service() {
rm -rf $CONFIGPATH
mkdir $CONFIGPATH
mkdir -p $PIDPATH
config_load ddnsmngr
config_foreach add_object host
i=1
for conf in $CONFIG_FILES; do
instance="ddnsmngr_${i}"
i=$(( i + 1 ))
procd_open_instance $instance
procd_set_param command "$PROG"
procd_append_param command -f "${CONFIGPATH}/${conf}"
procd_append_param command -l debug
procd_append_param command -P "${PIDPATH}/${conf}"
procd_append_param command -n -C
procd_close_instance
done
}
stop_ddnsmngr_service() {
rm -rf $CONFIGPATH
return 0
}
add_ddnsmngr_triggers() {
procd_open_trigger
for intf in $CLIENT_INTFS; do
# No need to handle other ifevents like ifupdate etc
procd_add_interface_trigger "interface.*.up" $intf /etc/init.d/ddnsmngr restart
done
procd_close_trigger
}

View File

@@ -1,7 +0,0 @@
menu "Configuration"
config DECOLLECTOR_EASYMESH_VERSION
int "Support Easymesh version"
default 6
endmenu

View File

@@ -1,81 +0,0 @@
#
# Copyright (C) 2021-2024 IOPSYS Software Solutions AB
# Copyright (C) 2025 Genexis AB
#
include $(TOPDIR)/rules.mk
PKG_NAME:=decollector
PKG_VERSION:=6.2.1.7
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=ca92325ece080389ffb405c95048b64071eda653
PKG_SOURCE_URL:=https://dev.iopsys.eu/multi-ap/decollector.git
PKG_SOURCE:=$(PKG_NAME)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
PKG_MAINTAINER:=Anjan Chanda <anjan.chanda@genexis.eu>
PKG_LICENSE:=BSD-3-Clause
PKG_LICENSE_FILES:=LICENSE
include $(INCLUDE_DIR)/package.mk
define Package/decollector
SECTION:=utils
CATEGORY:=Utilities
TITLE:=WiFi DataElements Collector Proxy
DEPENDS:=+libuci +libubox +ubus +libpthread +libnl-genl \
+libeasy +libwifiutils +libieee1905 +ieee1905-map-plugin
endef
define Package/decollector/description
It implements the WiFi DataElements Agent as defined in the Wi-Fi Alliance's
DataElements specification.
endef
define Package/decollector/config
source "$(SOURCE)/Config.in"
endef
ifneq (,$(findstring ALPHA,$(CONFIG_VERSION_CODE)))
MAKE_FLAGS += DEBUG=1
endif
ifneq (,$(findstring BETA,$(CONFIG_VERSION_CODE)))
MAKE_FLAGS += DEBUG=1
endif
TARGET_CFLAGS += \
-I$(STAGING_DIR)/usr/include \
-I$(STAGING_DIR)/usr/include/libnl3 \
-D_GNU_SOURCE
ifeq ($(LOCAL_DEV),1)
define Build/Prepare
rsync -r --exclude=.* ~/git/decollector/ $(PKG_BUILD_DIR)/
endef
endif
MAKE_PATH:=src
TARGET_CFLAGS += -DEASYMESH_VERSION=$(CONFIG_DECOLLECTOR_EASYMESH_VERSION)
EXECS := \
$(if $(CONFIG_PACKAGE_decollector),decollector)
MAKE_FLAGS += EXECS="$(EXECS)"
define Package/decollector/install
$(INSTALL_DIR) $(1)/etc/init.d
$(INSTALL_DIR) $(1)/usr/sbin
$(INSTALL_BIN) ./files/decollector.init $(1)/etc/init.d/decollector
$(INSTALL_BIN) $(PKG_BUILD_DIR)/src/decollector $(1)/usr/sbin/
endef
$(eval $(call BuildPackage,decollector))

View File

@@ -1,14 +1,20 @@
#
# Copyright (C) 2021 IOPSYS Software Solutions AB
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
PKG_NAME:=dectmngr
PKG_RELEASE:=3
PKG_VERSION:=3.7.10
PKG_VERSION:=3.5.1
LOCAL_DEV=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_URL:=https://dev.iopsys.eu/iopsys/dectmngr.git
PKG_SOURCE_PROTO:=git
PKG_SOURCE_VERSION:=1f851980a6ba616df54f79930225f8bcd563b711
PKG_SOURCE_VERSION:=8c94908eb230dc453dd2f5575be6c06b086b2e7d
PKG_MIRROR_HASH:=skip
endif
@@ -28,7 +34,7 @@ include $(INCLUDE_DIR)/package.mk
define Package/$(PKG_NAME)
CATEGORY:=Utilities
TITLE:=DECT Manager
DEPENDS:= +libubox +ubus +uci +libxml2 +libjson-c +gpiod-tools +voicemngr
DEPENDS:= +libubox +ubus +uci +libxml2 +libjson-c
endef
define Package/$(PKG_NAME)/description
@@ -56,14 +62,11 @@ endif
define Package/$(PKG_NAME)/install
$(INSTALL_DIR) $(1)/usr/sbin
$(INSTALL_DIR) $(1)/etc/dspg
$(INSTALL_DIR) $(1)/etc
$(INSTALL_DIR) $(1)/lib/upgrade/keep.d
$(INSTALL_BIN) $(PKG_BUILD_DIR)/app/dectmngr $(1)/usr/sbin/
$(STRIP) $(1)/usr/sbin/dectmngr
ifeq ($(CONFIG_TARGET_airoha),)
$(CP) ./firmware/common/* $(1)/etc/dspg/
endif
$(CP) ./files/etc/* $(1)/etc/
$(INSTALL_DATA) ./files/lib/upgrade/keep.d/dect $(1)/lib/upgrade/keep.d/dect
endef

View File

@@ -1,6 +1,3 @@
config dect 'global'
option log_dect_cmbs 'syslog'
option log_level 'realtime,warning,error'
config dect 'base'
option enable '1'
option log_dect_cmbs 'syslog'
option log_level 'realtime,warning,error'

Binary file not shown.

View File

@@ -6,124 +6,31 @@ START=70
STOP=12
USE_PROCD=1
NAME=dectmngr
PROG=/usr/sbin/dectmngr
LOG_PATH=/var/log/dectmngr
DB_PATH=/etc/dect
DCX81_UART_DT_ALIAS=/proc/device-tree/aliases/dcx81-uart
get_extension_shift() {
local dect_exts
get_dect_extension() {
local ext=$1
local type
config_get type $ext type
[ "$type" == "dect" ] && echo $ext
}
config_load "asterisk"
dect_exts=$(config_foreach get_dect_extension "extension" |sort |head -n1)
echo "${dect_exts#extension}"
}
# Ask dectmngr to exit nicely and wait for it to clean up, which is a slow process.
stop_and_wait_dectmngr() {
dect_pid=$(pidof $PROG)
[ -n "$dect_pid" ] && kill $dect_pid
pidof $NAME && killall -q $NAME
pidof $PROG > /dev/null 2>&1 && sleep 2 # wait for the process to stop gracefully
while pidof $PROG > /dev/null 2>&1; do
dect_pid=$(pidof $PROG)
[ -n "$dect_pid" ] && kill -9 $dect_pid
pidof $NAME && sleep 2 # wait for the process to stop gracefully
while pidof $NAME; do
killall -q -9 $NAME
sleep 1
done
}
has_dect() {
[ -f "$DCX81_UART_DT_ALIAS" ]
}
get_dcx81_device() {
readonly dcx81_uart_dt_node="/proc/device-tree/$(cat "$DCX81_UART_DT_ALIAS" 2>/dev/null)"
[ -e "$dcx81_uart_dt_node" ] || return 1
for tty_dt_node in /sys/class/tty/*/device/of_node; do
if [ "$tty_dt_node" -ef "$dcx81_uart_dt_node" ]; then
readonly uevent_file="${tty_dt_node%%/device/of_node}/uevent"
local device_name_line
device_name_line="$(grep '^DEVNAME=' "$uevent_file")" || return 1
readonly device="/dev/${device_name_line##DEVNAME=}"
[ -c "$device" ] || return 1
printf "%s" "$(basename $device)"
return 0
fi
done
return 1
}
check_dcx81_firmware() {
local dcx81_uart=$1
local fw_link="/lib/firmware/dcx81_firmware"
local fw_file
[ -L "$fw_link" ] || return
fw_file=$(readlink -f $fw_link)
[ -f "$fw_file" ] || return
# the symbolic link is not needed
rm -f $fw_link
eval $(/sbin/cmbs_tcx -comname "$dcx81_uart" -fw_version |grep DCX81_FW_Version)
[ -n "$DCX81_FW_Version" ] || return
if echo $(basename $fw_file) | grep -qi "$DCX81_FW_Version" ; then
logger -t "$PROG" "DCX81 running expected $DCX81_FW_Version"
return;
fi
logger -t "$PROG" "DCX81 firmware upgrading to $fw_file"
/sbin/cmbs_tcx -comname "$dcx81_uart" -fwu "$fw_file" 2>&1 >/dev/null &
echo -n "Updrading DCX81 firmware.." >/dev/console
local wait_time=0
while pidof cmbs_tcx >/dev/null && [ "$wait_time" -lt "200" ] ; do
sleep 5
wait_time=$(($wait_time + 5))
echo -n "." >/dev/console
done
if pidof cmbs_tcx >/dev/null ; then
killall -9 cmbs_tcx
logger -t "$PROG" "DCX81 firmware upgrade timeout"
else
logger -t "$PROG" "DCX81 firmware upgrade done"
fi
}
start_service() {
local opt_ext=
local rfpi=
local model_id=
local rxtun=
if ! has_dect; then
logger -t "$PROG" "Not starting because no DECT hardware is available."
return 0
fi
test $(db get hw.board.hasDect) = "0" && return
local dcx81_uart_device
if ! dcx81_uart_device="$(get_dcx81_device)"; then
logger -t "$PROG" -p daemon.warning "Could not determine DCX81 UART device. Falling back to default ttyH0."
dcx81_uart_device="ttyH0"
fi
check_dcx81_firmware $dcx81_uart_device
opt_ext="-extensionShift $(get_extension_shift)"
echo 1 > /sys/class/gpio/gpio14/value
rfpi=$(db -q get hw.board.dect_rfpi)
[ -n "$rfpi" -a ${#rfpi} -eq 14 ] && opt_ext="$opt_ext -rfpi $rfpi"
@@ -140,30 +47,20 @@ start_service() {
config_load dect
config_get log_dect_cmbs global log_dect_cmbs syslog
config_get pcm_slot_start global pcm_slot_start
config_get pcm_fsync global pcm_fsync
[ -n "$pcm_fsync" ] && opt_ext="$opt_ext -sync $pcm_fsync"
[ -n "$pcm_slot_start" ] && opt_ext="$opt_ext -slotsShift $pcm_slot_start"
procd_open_instance
case "$log_dect_cmbs" in
none)
if [ "$log_dect_cmbs" = "none" ]; then
echo "Starting dectmngr with cmbs logging disabled"
procd_set_param command "$PROG" -comname "$dcx81_uart_device" $opt_ext
procd_set_param command $PROG -comname ttyH0 $opt_ext
rm -f $LOG_PATH/*
;;
file)
elif [ "$log_dect_cmbs" = "file" ]; then
echo "Starting dectmngr with cmbs logging enabled to file"
procd_set_param command "$PROG" -comname "$dcx81_uart_device" -log $LOG_PATH/dect-cmbs.log $opt_ext
;;
*)
procd_set_param command $PROG -comname ttyH0 -log $LOG_PATH/dect-cmbs.log $opt_ext
else
echo "Starting dectmngr with cmbs logging enabled to syslog"
procd_set_param command "$PROG" -comname "$dcx81_uart_device" -syslog $opt_ext
procd_set_param command $PROG -comname ttyH0 -syslog $opt_ext
rm -f $LOG_PATH/*
;;
esac
fi
procd_set_param respawn 6 2 3
procd_set_param term_timeout 20
@@ -172,8 +69,9 @@ start_service() {
}
stop_service() {
has_dect || return 0
test $(db get hw.board.hasDect) = "0" && return
echo 0 > /sys/class/gpio/gpio14/value
stop_and_wait_dectmngr
}
@@ -181,13 +79,18 @@ reload_service() {
ubus call dect reload
}
service_triggers() {
service_triggers()
{
procd_add_config_trigger "config.change" "asterisk" /etc/init.d/dectmngr restart
procd_add_config_trigger "config.change" "dect" /etc/init.d/dectmngr reload
}
boot() {
echo 14 > /sys/class/gpio/export
echo out > /sys/class/gpio/gpio14/direction
[ ! -d $LOG_PATH ] && mkdir -p $LOG_PATH
[ ! -d $DB_PATH ] && mkdir -p $DB_PATH
start
}

View File

@@ -1,59 +0,0 @@
#
# Copyright (C) 2024 IOPSYS Software Solutions AB
#
include $(TOPDIR)/rules.mk
PKG_NAME:=dhcpmngr
PKG_VERSION:=1.0.6
LOCAL_DEV:=0
ifneq ($(LOCAL_DEV),1)
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://dev.iopsys.eu/network/dhcpmngr.git
PKG_SOURCE_VERSION:=986f66608959f4f589009d580b046e250d8c620d
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_MIRROR_HASH:=skip
endif
PKG_LICENSE:=BSD-3-Clause
PKG_LICENSE_FILES:=LICENSE
include $(INCLUDE_DIR)/package.mk
include ../bbfdm/bbfdm.mk
MAKE_PATH:=src
define Package/dhcpmngr
SECTION:=net
CATEGORY:=Network
TITLE:=Package to add Device.DHCPv4 and v6 data model support.
DEPENDS:=+libuci +libubox +libubus +libblobmsg-json +libjson-c
DEPENDS+=+libbbfdm-api +libbbfdm-ubus +dm-service
DEPENDS+=+DNSMNGR_DNS_SD:umdns
DEPENDS+=+DNSMNGR_BACKEND_DNSMASQ:dnsmasq
DEPENDS+=+DNSMNGR_BACKEND_UNBOUND:odhcpd
endef
define Package/dhcpmngr/description
Package to add Device.DHCPv4. and Device.DHCPv6. data model support.
endef
ifeq ($(LOCAL_DEV),1)
define Build/Prepare
$(CP) -rf ~/git/dhcpmngr/* $(PKG_BUILD_DIR)/
endef
endif
define Package/dhcpmngr/install
$(INSTALL_DIR) $(1)/etc/udhcpc.user.d
$(INSTALL_BIN) ./files/etc/udhcpc.user.d/udhcpc_lease_start_time.user $(1)/etc/udhcpc.user.d/udhcpc_lease_start_time.user
ifeq ($(CONFIG_DNSMNGR_BACKEND_UNBOUND),y)
$(INSTALL_DIR) $(1)/etc/uci-defaults
$(INSTALL_DATA) ./files/etc/uci-defaults/unbound.odhcpd.uci_default $(1)/etc/uci-defaults/16-set-unbound-as-odhcpd-leasetrigger
endif
$(BBFDM_REGISTER_SERVICES) ./bbfdm_service.json $(1) $(PKG_NAME)
$(BBFDM_INSTALL_MS_DM) $(PKG_BUILD_DIR)/src/libdhcpmngr.so $(1) $(PKG_NAME)
endef
$(eval $(call BuildPackage,dhcpmngr))

View File

@@ -1,20 +0,0 @@
{
"daemon": {
"enable": "1",
"service_name": "dhcpmngr",
"unified_daemon": false,
"services": [
{
"parent_dm": "Device.",
"object": "DHCPv4"
},
{
"parent_dm": "Device.",
"object": "DHCPv6"
}
],
"config": {
"loglevel": "3"
}
}
}

View File

@@ -1,17 +0,0 @@
#!/bin/sh
# update odhcpd uci to use unbound's script as leasetrigger
uci -q get dhcp.odhcpd >/dev/null 2>&1 && {
maindhcp="$(uci -q get dhcp.odhcpd.maindhcp)"
# if odhcpd is the main dhcp
[ "$maindhcp" = "1" ] || [ "$maindhcp" = "true" ] || [ "$maindhcp" = "on" ] && {
# if unbound daemon and unbound script file is present
[ -e /usr/lib/unbound/odhcpd.sh ] && [ -e /usr/sbin/unbound ] && {
# then set unbound script as leasetrigger in dhcp UCI
uci -q set dhcp.odhcpd.leasetrigger='/usr/lib/unbound/odhcpd.sh'
}
}
}
exit 0

View File

@@ -1,14 +0,0 @@
#!/bin/sh
leasestarttime="$(awk -F'.' '{print $1}' /proc/uptime 2> /dev/null)"
target_file=/tmp/dhcp_client_info
target_str="$INTERFACE $lease $leasestarttime"
# if this interface is present in file, then replace it
if grep -q "$INTERFACE" "$target_file" 2> /dev/null; then
# replace the whole line if pattern matches
sed -i "/${INTERFACE}/c\\${target_str}" "$target_file"
else
# interface info was not present, append it to the file
echo "$target_str" >> "$target_file"
fi

Binary file not shown.

View File

@@ -1,76 +0,0 @@
#
# Copyright (c) 2023 Genexis B.V. All rights reserved.
# This Software and its content are protected by the Dutch Copyright Act
# ('Auteurswet'). All and any copying and distribution of the software
# and its content without authorization by Genexis B.V. is
# prohibited. The prohibition includes every form of reproduction and
# distribution.
#
#
include $(TOPDIR)/rules.mk
PKG_NAME:=datamodels
PKG_VERSION:=1.0.0
PKG_RELEASE:=1
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)
include $(INCLUDE_DIR)/package.mk
define Package/$(PKG_NAME)
SECTION:=utils
CATEGORY:=Genexis
TITLE:=GeneOS Datamodel
URL:=http://www.genexis.eu
PKG_LICENSE:=GENEXIS
PKG_LICENSE_URL:=
endef
define Package/$(PKG_NAME)/description
This package contains GeneOS datamodel.
endef
define Build/Prepare
mkdir -p $(PKG_BUILD_DIR)
$(CP) -rf ./src/* $(PKG_BUILD_DIR)/
cd $(PKG_BUILD_DIR); \
npm install better-sqlite3 quickjs && \
node ./scripts/json2code.js && \
node ./scripts/qjs-handlers-validate.js
endef
TARGET_CFLAGS += $(FPIC) -I$(PKG_BUILD_DIR)
define Build/Compile
$(MAKE) -C $(PKG_BUILD_DIR)\
PROJECT_ROOT="$(PKG_BUILD_DIR)" \
CROSS_COMPILE="$(TARGET_CROSS)" \
ARCH="$(LINUX_KARCH)" \
EXTRA_CFLAGS="$(TARGET_CFLAGS)" \
all
endef
define Build/InstallDev
$(INSTALL_DIR) $(1)/usr/include
$(INSTALL_DIR) $(1)/usr/lib
$(CP) $(PKG_BUILD_DIR)/dm_types.h $(1)/usr/include/
$(CP) $(PKG_BUILD_DIR)/dm_node.h $(1)/usr/include/
$(CP) $(PKG_BUILD_DIR)/dm.h $(1)/usr/include/
$(CP) $(PKG_BUILD_DIR)/libdm.so $(1)/usr/lib/
endef
define Package/$(PKG_NAME)/install
$(INSTALL_DIR) $(1)/usr/lib
$(INSTALL_DIR) $(1)/etc/config
$(INSTALL_DIR) $(1)/etc/bbfdm
$(INSTALL_DIR) $(1)/usr/lib/dmf_handlers
$(INSTALL_BIN) $(PKG_BUILD_DIR)/default.db $(1)/etc/bbfdm/default_dm.db
$(INSTALL_BIN) $(PKG_BUILD_DIR)/libdm.so $(1)/usr/lib/
# Copy only .js handler files recursively, preserving folder structure (skip hidden files/folders)
( cd $(PKG_BUILD_DIR)/dm-files; \
find . -type d -not -path './.*' -exec $(INSTALL_DIR) $(1)/usr/lib/dmf_handlers/{} \; ; \
find . -type f -name '*.js' -not -path './.*' -exec $(INSTALL_BIN) {} $(1)/usr/lib/dmf_handlers/{} \; )
endef
$(eval $(call BuildPackage,$(PKG_NAME)))

View File

@@ -1,41 +0,0 @@
#
# Copyright (c) 2023 Genexis B.V. All rights reserved.
# This Software and its content are protected by the Dutch Copyright Act
# ('Auteurswet'). All and any copying and distribution of the software
# and its content without authorization by Genexis B.V. is
# prohibited. The prohibition includes every form of reproduction and
# distribution.
#
#
PROG = libdm.so
SRCS = dm_node.c
# the next files are generated
SRCS += dm.c
OBJS = $(SRCS:.c=.o)
DEPS = $(SRCS:.c=.d)
CC = $(CROSS_COMPILE)gcc
STRIP = $(CROSS_COMPILE)strip
CFLAGS = -I$(STAGING_DIR)/usr/include $(EXTRA_CFLAGS)
CFLAGS += -MMD -MP
LDFLAGS = -shared
CFLAGS += -Wall -Werror -fpic
all: $(PROG)
$(PROG): $(OBJS)
$(CC) $^ $(LDFLAGS) -o $@
%.o: %.c
$(CC) $(CFLAGS) -c $^ -o $@
clean:
rm -f $(PROG) *.o core $(DEPS) dm.c dm.h
-include $(DEPS)

View File

@@ -1,443 +0,0 @@
[
{
"object": "Device.Bridging.",
"access": "readOnly",
"parameters": [
{
"name": "MaxBridgeEntries",
"access": "readOnly",
"dataType": "unsignedInt",
"const" : "4094"
},
{
"name": "MaxDBridgeEntries",
"access": "readOnly",
"dataType": "unsignedInt",
"const" : "4094"
},
{
"name": "MaxQBridgeEntries",
"access": "readOnly",
"dataType": "unsignedInt",
"const" : "4094"
},
{
"name": "MaxVLANEntries",
"access": "readOnly",
"dataType": "unsignedInt",
"const" : "4094"
},
{
"name": "BridgeNumberOfEntries",
"access": "readOnly",
"dataType": "unsignedInt"
}
]
},
{
"object": "Device.Bridging.Bridge.{i}.",
"uniqueKeys": "Name,Alias",
"access": "readWrite",
"uci": "network.device",
"parameters": [
{
"name": "Enable",
"access": "readWrite",
"dataType": "boolean",
"uci": "enabled",
"uci-default": "true"
},
{
"name": "Name",
"access": "readOnly",
"dataType": "string(:64)",
"set_on_create": "bridge_",
"db": true
},
{
"name": "Alias",
"access": "readWrite",
"dataType": "string(:64)"
},
{
"name": "Status",
"access": "readOnly",
"dataType": "enum",
"enum": [
"Disabled",
"Enabled",
"Error"
],
"default": "Disabled"
},
{
"name": "Standard",
"access": "readWrite",
"dataType": "enum",
"enum": [
"802.1D-2004",
"802.1Q-2005",
"802.1Q-2011"
],
"default": "802.1Q-2011"
},
{
"name": "PortNumberOfEntries",
"access": "readOnly",
"dataType": "unsignedInt"
},
{
"name": "VLANNumberOfEntries",
"access": "readOnly",
"dataType": "unsignedInt"
},
{
"name": "VLANPortNumberOfEntries",
"access": "readOnly",
"dataType": "unsignedInt"
}
]
},
{
"object": "Device.Bridging.Bridge.{i}.STP.",
"access": "readOnly",
"parameters": [
{
"name": "Enable",
"access": "readWrite",
"dataType": "boolean",
"uci": "stp"
},
{
"name": "Status",
"access": "readOnly",
"dataType": "enum",
"enum": [
"Disabled",
"Enabled",
"Error_Misconfigured",
"Error"
],
"default": "Disabled"
},
{
"name": "Protocol",
"access": "readWrite",
"dataType": "enum",
"enum": [
"STP",
"RSTP"
]
},
{
"name": "BridgePriority",
"access": "readWrite",
"dataType": "unsignedInt(0:61440)",
"default": "32768"
},
{
"name": "HelloTime",
"access": "readWrite",
"dataType": "unsignedInt(100:1000)",
"default": "200"
},
{
"name": "MaxAge",
"access": "readWrite",
"dataType": "unsignedInt(600:4000)",
"default": "2000"
},
{
"name": "ForwardingDelay",
"access": "readWrite",
"dataType": "unsignedInt(4:30)",
"default": "15"
}
]
},
{
"object": "Device.Bridging.Bridge.{i}.Port.{i}.",
"uniqueKeys": "Alias,Name",
"access": "readWrite",
"parameters": [
{
"name": "Enable",
"access": "readWrite",
"dataType": "boolean"
},
{
"name": "Status",
"access": "readOnly",
"dataType": "enum",
"enum": [
"Up",
"Down",
"Unknown",
"Dormant",
"NotPresent",
"LowerLayerDown",
"Error"
],
"default": "Down"
},
{
"name": "Alias",
"access": "readWrite",
"dataType": "string(:64)"
},
{
"name": "Name",
"access": "readOnly",
"dataType": "string(:64)",
"set_on_create": "port_",
"db": "true",
"flags": [
"linker"
],
"js-value": "ifname"
},
{
"name": "LastChange",
"access": "readOnly",
"dataType": "unsignedInt",
"const": "0"
},
{
"name": "LowerLayers",
"access": "readWrite",
"dataType": "pathRef[]",
"pathRef": [
"Device.Bridging.Bridge.{i}.Port."
],
"js-value": "ssidPath"
},
{
"name": "ManagementPort",
"access": "readWrite",
"dataType": "boolean"
},
{
"name": "PriorityRegeneration",
"access": "readWrite",
"dataType": "unsignedInt(0:7)[]",
"default": "0,1,2,3,4,5,6,7"
},
{
"name": "{BBF_VENDOR_PREFIX}EgressPriorityRegeneration",
"access": "readWrite",
"dataType": "unsignedInt(0:7)[]"
},
{
"name": "Type",
"access": "readWrite",
"dataType": "enum",
"enum": [
"ProviderNetworkPort",
"CustomerNetworkPort",
"CustomerEdgePort",
"CustomerVLANPort",
"VLANUnawarePort"
],
"default": "CustomerVLANPort"
},
{
"name": "PVID",
"access": "readWrite",
"dataType": "int(1:4094)",
"default": "1"
},
{
"name": "TPID",
"access": "readWrite",
"dataType": "unsignedInt",
"default": "33024"
}
]
},
{
"object": "Device.Bridging.Bridge.{i}.Port.{i}.Stats.",
"access": "readOnly",
"parameters": [
{
"name": "BytesSent",
"dataType": "unsignedLong"
},
{
"name": "BytesReceived",
"dataType": "unsignedLong"
},
{
"name": "PacketsSent",
"dataType": "unsignedLong"
},
{
"name": "PacketsReceived",
"dataType": "unsignedLong"
},
{
"name": "ErrorsSent",
"dataType": "StatsCounter32"
},
{
"name": "ErrorsReceived",
"dataType": "StatsCounter32"
},
{
"name": "UnicastPacketsSent",
"dataType": "unsignedLong"
},
{
"name": "DiscardPacketsSent",
"dataType": "StatsCounter32"
},
{
"name": "DiscardPacketsReceived",
"dataType": "StatsCounter32"
},
{
"name": "MulticastPacketsSent",
"dataType": "unsignedLong"
},
{
"name": "UnicastPacketsReceived",
"dataType": "unsignedLong"
},
{
"name": "MulticastPacketsReceived",
"dataType": "unsignedLong"
},
{
"name": "BroadcastPacketsSent",
"dataType": "unsignedLong"
},
{
"name": "BroadcastPacketsReceived",
"dataType": "unsignedLong"
},
{
"name": "UnknownProtoPacketsReceived",
"dataType": "StatsCounter32"
}
]
},
{
"object": "Device.Bridging.Bridge.{i}.VLAN.{i}.",
"uniqueKeys": "Alias,VLANID",
"access": "readWrite",
"parameters": [
{
"name": "Enable",
"access": "readWrite",
"dataType": "boolean"
},
{
"name": "Name",
"access": "readWrite",
"dataType": "string(:64)",
"set_on_create": "vlan_"
},
{
"name": "Alias",
"access": "readWrite",
"dataType": "string(:64)"
},
{
"name": "VLANID",
"access": "readWrite",
"dataType": "int(0:4094)"
}
]
},
{
"object": "Device.Bridging.Bridge.{i}.VLANPort.{i}.",
"uniqueKeys": "Alias,VLAN",
"access": "readWrite",
"parameters": [
{
"name": "Enable",
"access": "readWrite",
"dataType": "boolean"
},
{
"name": "Alias",
"access": "readWrite",
"dataType": "string(:64)"
},
{
"name": "VLAN",
"access": "readWrite",
"dataType": "pathRef",
"pathRef": [
"Device.Bridging.Bridge.{i}.VLAN."
]
},
{
"name": "Port",
"access": "readWrite",
"dataType": "pathRef",
"pathRef": [
"Device.Bridging.Bridge.{i}.Port."
]
},
{
"name": "Untagged",
"access": "readWrite",
"dataType": "boolean"
}
]
},
{
"object": "Device.Bridging.ProviderBridge.{i}.",
"uniqueKeys": "Alias",
"access": "readWrite",
"parameters": [
{
"name": "Enable",
"access": "readWrite",
"dataType": "boolean"
},
{
"name": "Status",
"access": "readOnly",
"dataType": "enum",
"enum": [
"Disabled",
"Enabled",
"Error_Misconfigured",
"Error"
],
"default": "Disabled"
},
{
"name": "Alias",
"access": "readWrite",
"dataType": "string(:64)"
},
{
"name": "Type",
"access": "readWrite",
"dataType": "enum",
"enum": [
"S-VLAN",
"PE"
]
},
{
"name": "SVLANcomponent",
"access": "readWrite",
"dataType": "pathRef",
"pathRef": [
"Device.Bridging.Bridge."
]
},
{
"name": "CVLANcomponents",
"access": "readWrite",
"dataType": "pathRef[]",
"pathRef": [
"Device.Bridging.Bridge."
]
}
]
}
]

View File

@@ -1,166 +0,0 @@
/*
* Copyright (c) 2025 Genexis B.V. All rights reserved.
*
* This Software and its content are protected by the Dutch Copyright Act
* ('Auteurswet'). All and any copying and distribution of the software
* and its content without authorization by Genexis B.V. is
* prohibited. The prohibition includes every form of reproduction and
* distribution.
*
*/
import {
getUciOption, getUciByType, setUci, addUci, delUci
} from '../uci.js';
import * as dm from '../dm_consts.js';
import { getBridgeDeviceType } from './common.js';
function clearUnusedDevice(oldPorts, newPorts, devices) {
oldPorts?.forEach(port => {
if (port.includes('.') && !newPorts?.includes(port)) {
const dev = devices?.find(x => x.name === port);
if (dev?.['.name']) delUci('network', dev['.name']);
}
});
}
function applyBridge(bri, ports, VLANs, VLANPorts) {
const ifnames = [];
const devices = getUciByType('network', 'device')?.filter(x => x.type !== undefined);
const portsVal = getUciOption('network', bri._key, 'ports');
if (portsVal) delUci('network', bri._key, null, 'ports');
// get ports ethernet ifnames
for (const port of ports || []) {
if (port.ManagementPort || !port.LowerLayers.includes('Ethernet.Interface') || !port.Enable) {
continue;
}
let ifname = _dm_linker_value(port.LowerLayers);
if (!ifname) {
_log_error(`ifname not found for port: ${port.LowerLayers}`);
continue;
}
// check vlan
const portPath = `Device.Bridging.Bridge.${bri['.index']}.Port.${port['.index']}`;
const vp = VLANPorts?.find(x => x.Port === portPath);
if (!vp?.VLAN) {
ifnames.push(ifname);
continue;
}
// get index of the vlan
const [, indices] = _dm_node(vp.VLAN);
const vlanIdx = indices[indices.length - 1];
const vlan = VLANs?.find(x => x['.index'] === vlanIdx);
if (!vlan || vlan.VLANID <= 0) {
ifnames.push(ifname);
continue;
}
const eth = ifname;
ifname = `${ifname}.${vlan.VLANID}`;
const dev = devices?.find(x => x.name === ifname);
let devName;
if (dev) {
devName = dev['.name'];
} else {
devName = `br_${bri['.index']}_port_${vp['.index']}`;
addUci('network', 'device', devName, {
ifname: eth,
name: ifname,
vid: vlan.VLANID,
});
}
const uciConfigs = {};
// Handle Type parameter - determine device type based on port Type or default behavior
let deviceType = '';
if (port.Type) {
deviceType = getBridgeDeviceType(port.Type);
if (deviceType) uciConfigs.type = deviceType;
} else if (!vp.Untagged) {
uciConfigs.type = '8021q';
deviceType = '8021q';
}
// Handle TPID parameter
if (port.TPID) {
// If TPID is explicitly set, use it and derive device type if needed
uciConfigs.tpid = port.TPID;
// Set device type based on TPID if not already set
if (!deviceType) {
if (port.TPID === '33024') {
uciConfigs.type = '8021q';
} else if (port.TPID === '34984') {
uciConfigs.type = '8021ad';
}
}
}
uciConfigs.disabled = vlan.Enable && vp.Enable ? '0' : '1';
uciConfigs.ingress_qos_mapping = port.PriorityRegeneration !== '0,1,2,3,4,5,6,7'
? port.PriorityRegeneration.split(',').map((p, i) => `${i}:${p}`)
: '';
uciConfigs.egress_qos_mapping = port.X_IOPSYS_EU_EgressPriorityRegeneration !== ''
? port.X_IOPSYS_EU_EgressPriorityRegeneration.split(',').map((p, i) => `${i}:${p}`)
: '';
setUci('network', devName, uciConfigs);
ifnames.push(ifname);
}
clearUnusedDevice(portsVal, ifnames, devices);
if (ifnames.length > 0) {
setUci('network', bri._key, { ports: ifnames });
}
}
export function applyDeviceBridgingBridgePort(ports, bri) {
const vlans = _dm_get(dm.DM_DEVICE_BRIDGING_BRIDGE_VLAN, bri['.index']);
const vlanPorts = _dm_get(dm.DM_DEVICE_BRIDGING_BRIDGE_VLANPORT, bri['.index']);
applyBridge(bri, ports, vlans, vlanPorts);
}
export function applyDeviceBridgingBridgeVLAN(vlans, bri) {
const ports = _dm_get(dm.DM_DEVICE_BRIDGING_BRIDGE_PORT, bri['.index']);
const vlanPorts = _dm_get(dm.DM_DEVICE_BRIDGING_BRIDGE_VLANPORT, bri['.index']);
applyBridge(bri, ports, vlans, vlanPorts);
}
export function applyDeviceBridgingBridgeVLANPort(vlanPorts, bri) {
const ports = _dm_get(dm.DM_DEVICE_BRIDGING_BRIDGE_PORT, bri['.index']);
const vlans = _dm_get(dm.DM_DEVICE_BRIDGING_BRIDGE_VLAN, bri['.index']);
applyBridge(bri, ports, vlans, vlanPorts);
}
export function initDeviceBridgingBridge(bri) {
setUci('network', bri._key, {
type: 'bridge',
name: bri.Name,
enabled: '0',
});
// create empty interface for the bridge
addUci('network', 'interface', `itf_${bri._key}`, {
device: bri.Name,
bridge_empty: '1',
});
}
export const filterDeviceBridgingBridge = uci => uci.type === 'bridge';
export function deinitDeviceBridgingBridge(uci) {
const ports = getUciOption('network', uci, 'ports');
ports?.forEach(port => {
if (port.includes('.')) {
const dev = getUciByType('network', 'device', { match: { name: port } });
if (dev) delUci('network', dev[0]['.name']);
}
});
}

View File

@@ -1,125 +0,0 @@
/*
* Copyright (c) 2025 Genexis B.V. All rights reserved.
*
* This Software and its content are protected by the Dutch Copyright Act
* ('Auteurswet'). All and any copying and distribution of the software
* and its content without authorization by Genexis B.V. is
* prohibited. The prohibition includes every form of reproduction and
* distribution.
*
*/
import { getUciByType } from '../uci.js';
import { getBridgePortType, getTPIDFromDeviceType } from './common.js';
function importBridge(dev, devices, bridges) {
const briPorts = [];
const briVLAN = [];
const briVLANPort = [];
// create the management port first
briPorts.push({
Alias: `cpe-${dev.name}`,
Enable: 1,
Name: dev.name,
ManagementPort: 1,
PVID: 1,
TPID: 37120,
Type: 'CustomerVLANPort',
});
bridges.push({
Name: dev.name,
Alias: `cpe-${dev.name}`,
Enable: 1,
'Port.': briPorts,
'VLAN.': briVLAN,
'VLANPort.': briVLANPort,
_key: dev['.name'],
});
const ethPorts = devices.filter(x => x.ifname?.startsWith('eth'));
for (const portName of (dev.ports || [])) {
let portIndex = ethPorts.findIndex(x => x.ifname === portName);
if (portIndex >= 0) {
// Regular ethernet port
const ethDevice = ethPorts[portIndex];
const portType = getBridgePortType(ethDevice.type) || 'CustomerVLANPort';
const tpid = getTPIDFromDeviceType(ethDevice.type, ethDevice.tpid);
briPorts.push({
Enable: 1,
Name: ethDevice['.name'],
Alias: `cpe-${ethDevice['.name']}`,
TPID: tpid,
PVID: 1,
Type: portType,
LowerLayers: `Device.Ethernet.Interface.${portIndex + 1}`,
_key: ethDevice['.name'],
});
} else {
// vlan device
const device = devices.find(x => x.name === portName);
if (!device) {
_log_error('device not found', portName);
continue;
}
if (device.type === '8021q' || device.type === 'untagged' || device.type === '8021ad' || device.type === 'transparent') {
let vlanIndex = briVLAN.findIndex(x => x.VLANID === device.vid);
if (vlanIndex < 0) {
briVLAN.push({ Enable: 1, VLANID: device.vid });
vlanIndex = briVLAN.length;
} else {
vlanIndex += 1;
}
// Get the base ethernet device to determine the correct port index
const baseEthDevice = ethPorts.find(x => device.ifname === x.ifname);
const basePortIndex = baseEthDevice ? ethPorts.indexOf(baseEthDevice) : 0;
const portType = getBridgePortType(device.type) || 'CustomerVLANPort';
const tpid = getTPIDFromDeviceType(device.type, device.tpid);
briPorts.push({
Enable: 1,
Name: device['.name'],
Alias: `cpe-${device['.name']}`,
TPID: tpid,
PVID: device.vid,
Type: portType,
LowerLayers: `Device.Ethernet.Interface.${basePortIndex + 1}`,
_key: device['.name'],
});
briVLANPort.push({
Enable: 1,
VLAN: `Device.Bridging.Bridge.${bridges.length}.VLAN.${vlanIndex}`,
Port: `Device.Bridging.Bridge.${bridges.length}.Port.${briPorts.length}`,
Untagged: device.type === 'untagged' ? 1 : 0,
_key: device['.name'],
});
} else {
_log_error('unknown device type:', device.type);
}
}
}
if (briPorts.length > 1) {
const indexes = Array.from({ length: briPorts.length - 1 }, (v, i) => i + 2);
briPorts[0].LowerLayers = indexes.map(i => `Device.Bridging.Bridge.${bridges.length}.Port.${i}`).join(',');
}
}
export function importDeviceBridgingBridge() {
const bridges = [];
const devices = getUciByType('network', 'device');
devices?.forEach(dev => {
if (dev.type === 'bridge') {
importBridge(dev, devices, bridges);
}
});
return bridges;
}

View File

@@ -1,133 +0,0 @@
/*
* Copyright (c) 2025 Genexis B.V. All rights reserved.
*
* This Software and its content are protected by the Dutch Copyright Act
* ('Auteurswet'). All and any copying and distribution of the software
* and its content without authorization by Genexis B.V. is
* prohibited. The prohibition includes every form of reproduction and
* distribution.
*
*/
import * as std from 'std';
import { isTrue } from '../utils.js';
import { getUciByType } from '../uci.js';
function setMgmtPortLowerLayers(bri) {
if (!bri) return 0;
const portPath = `Device.Bridging.Bridge.${bri['.index']}.Port.`;
const mgmtPort = _dm_instances(portPath, '(ManagementPort="true" OR ManagementPort=1)');
if (mgmtPort.length !== 1) return 0;
const nonMgmtPort = _dm_instances(portPath, '(ManagementPort="false" OR ManagementPort=0)');
_dm_update(`${mgmtPort[0]}.LowerLayers`, nonMgmtPort.join(','));
return 0;
}
export function changedDeviceBridgingBridgePort(bri) {
return setMgmtPortLowerLayers(bri);
}
export function changedDeviceBridgingBridgePortManagementPort(bri) {
return setMgmtPortLowerLayers(bri);
}
export function getDeviceBridgingBridgeStatus(bri) {
const enable = _dm_get(`Device.Bridging.Bridge.${bri['.index']}.Enable`);
return enable ? 'Enabled' : 'Disabled';
}
export function getDeviceBridgingBridgeSTPStatus(bri) {
const stpState = std.loadFile(`/sys/class/net/${bri.Name}/bridge/stp_state`)?.trim();
return stpState === '1' ? 'Enabled' : 'Disabled';
}
export function getDeviceBridgingBridgePortStatus(bri, port) {
if (!port['.db']) return 'Up';
const enable = _dm_get(`Device.Bridging.Bridge.${bri['.index']}.Port.${port['.index']}.Enable`);
return enable ? 'Up' : 'Down';
}
export function infoDeviceBridgingBridgePort(path, port) {
const mgmtPort = _dm_get(`${path}.ManagementPort`);
if (typeof mgmtPort === 'undefined' || mgmtPort) return;
const lower = _dm_get(`${path}.LowerLayers`);
if (lower) {
port.ifname = _dm_linker_value(lower);
}
}
// Helper function to read network statistics
function getNetworkStat(port, statName) {
return std.loadFile(`/sys/class/net/${port.ifname}/statistics/${statName}`)?.trim();
}
export const getDeviceBridgingBridgePortStatsBytesSent = (bri, port) =>
getNetworkStat(port, 'tx_bytes');
export const getDeviceBridgingBridgePortStatsBytesReceived = (bri, port) =>
getNetworkStat(port, 'rx_bytes');
export const getDeviceBridgingBridgePortStatsPacketsSent = (bri, port) =>
getNetworkStat(port, 'tx_packets');
export const getDeviceBridgingBridgePortStatsPacketsReceived = (bri, port) =>
getNetworkStat(port, 'rx_packets');
export const getDeviceBridgingBridgePortStatsErrorsSent = (bri, port) =>
getNetworkStat(port, 'tx_errors');
export const getDeviceBridgingBridgePortStatsErrorsReceived = (bri, port) =>
getNetworkStat(port, 'rx_errors');
export const getDeviceBridgingBridgePortStatsDiscardPacketsSent = (bri, port) =>
getNetworkStat(port, 'tx_dropped');
export const getDeviceBridgingBridgePortStatsDiscardPacketsReceived = (bri, port) =>
getNetworkStat(port, 'rx_dropped');
export const getDeviceBridgingBridgePortStatsMulticastPacketsReceived = (bri, port) =>
getNetworkStat(port, 'multicast');
export const getDeviceBridgingBridgePortStatsUnicastPacketsSent = (bri, port) =>
getNetworkStat(port, 'tx_unicast_packets');
export const getDeviceBridgingBridgePortStatsUnicastPacketsReceived = (bri, port) =>
getNetworkStat(port, 'rx_unicast_packets');
export const getDeviceBridgingBridgePortStatsMulticastPacketsSent = (bri, port) =>
getNetworkStat(port, 'tx_multicast_packets');
export const getDeviceBridgingBridgePortStatsBroadcastPacketsSent = (bri, port) =>
getNetworkStat(port, 'tx_broadcast_packets');
export const getDeviceBridgingBridgePortStatsBroadcastPacketsReceived = (bri, port) =>
getNetworkStat(port, 'rx_broadcast_packets');
export const getDeviceBridgingBridgePortStatsUnknownProtoPacketsReceived = (bri, port) =>
getNetworkStat(port, 'rx_unknown_packets');
export function getDeviceBridgingBridgePort(bri) {
const networkName = bri.Name.startsWith('br-') ? bri.Name.slice(3) : bri.Name;
const wifiIfaces = getUciByType('wireless', 'wifi-iface', { match: { multi_ap: '2' } });
wifiIfaces?.forEach(x => {
const ssid = getUciByType('dmmap_wireless', 'ssid',
{ match: { device: x.device, ssid: x.ssid}, confdir: '/etc/bbfdm/dmmap'});
if (Array.isArray(ssid) && ssid.length > 0) {
x.ssidPath = _dm_linker_path("Device.WiFi.SSID.", "Name", ssid[0].name) ?? '';
}
});
return wifiIfaces?.filter(x => x.network === networkName);
}
export function setDeviceBridgingBridgePortManagementPort(val, bri, port) {
if (isTrue(val)) {
_db_set(`Device.Bridging.Bridge.${bri['.index']}.Port.${port['.index']}.Name`, bri.Name);
}
return 1;
}

View File

@@ -1,61 +0,0 @@
/*
* Copyright (c) 2025 Genexis B.V. All rights reserved.
*
* This Software and its content are protected by the Dutch Copyright Act
* ('Auteurswet'). All and any copying and distribution of the software
* and its content without authorization by Genexis B.V. is
* prohibited. The prohibition includes every form of reproduction and
* distribution.
*
*/
export const bridgePortTypeMap = [
{ portType: 'CustomerNetworkPort', devType: '8021ad' },
{ portType: 'CustomerVLANPort', devType: '8021q' },
{ portType: 'CustomerVLANPort', devType: 'untagged' },
{ portType: 'CustomerVLANPort', devType: '' },
{ portType: 'CustomerVLANPort', devType: undefined },
{ portType: 'VLANUnawarePort', devType: 'transparent' }
];
export function getBridgePortType(devType) {
const mapping = bridgePortTypeMap.find(map => map.devType === devType);
return mapping ? mapping.portType : null;
}
export function getBridgeDeviceType(portType) {
const mapping = bridgePortTypeMap.find(map => map.portType === portType);
return mapping ? mapping.devType : null;
}
export function getDefaultTPID(deviceType) {
switch (deviceType) {
case '8021q':
return '33024';
case '8021ad':
return '34984';
default:
return '37120';
}
}
export function getTPIDFromDeviceType(deviceType, explicitTPID) {
// If explicit TPID is set, use it
if (explicitTPID && explicitTPID !== '') {
return parseInt(explicitTPID, 10);
}
// Default TPID based on device type
switch (deviceType) {
case '8021q':
return 33024;
case '8021ad':
return 34984;
case 'untagged':
case 'transparent':
case '':
case undefined:
default:
return 37120;
}
}

View File

@@ -1,126 +0,0 @@
/* eslint-disable no-undef */
/*
* Wrapper around the native QuickJS C binding `_uci_call` which speaks to
* libuci directly (see qjs_uci_api.c). The exported helpers mimic the public
* API of the original uci.js module so that existing call-sites can switch to
* this implementation by simply importing uci2.js.
*/
export function uciBool(val) {
// by default enable is true if it is not defined
return (val === undefined || val === '1' || val === 'true' || val === 'enable' || val === 'yes');
}
function callUci(method, args) {
const [ret, res] = _uci_call(method, args);
if (ret !== 0) {
// Returning undefined on error keeps behaviour consistent with the
// original helpers which silently return undefined.
return [ret, undefined];
}
return [ret, res];
}
export function getUci(args) {
const [, res] = callUci('get', args);
if (res) {
if (res.values) {
if (!args.section) {
return Object.values(res.values);
}
return res.values;
}
if (res.value !== undefined) {
return res.value;
}
}
return undefined;
}
export function getUciOption(config, section, option, extraArgs) {
let args = { config, section, option };
if (extraArgs) {
args = { ...args, ...extraArgs };
}
return getUci(args);
}
export function getUciByType(config, type, extraArgs) {
let args = { config, type };
if (extraArgs) {
args = { ...args, ...extraArgs };
}
return getUci(args);
}
export function getUciSection(config, section, extraArgs) {
let args = { config, section };
if (extraArgs) {
args = { ...args, ...extraArgs };
}
return getUci(args);
}
export function setUci(cfg, section, values, type, match, extraArgs) {
let args = { config: cfg, section };
if (type) args.type = type;
if (values) args.values = values;
if (match) args.match = match;
if (extraArgs) args = { ...args, ...extraArgs };
const [ret] = callUci('set', args);
return ret;
}
export function addUci(cfg, type, name, values, extraArgs) {
let args = { config: cfg, type };
if (name) args.name = name;
if (values) args.values = values;
if (extraArgs) args = { ...args, ...extraArgs };
const [, res] = callUci('add', args);
return res || undefined;
}
export function delUci(cfg, section, type, option, options, match, extraArgs) {
let args = { config: cfg };
if (section) args.section = section;
if (type) args.type = type;
if (option) args.option = option;
if (options) args.options = options;
if (match) args.match = match;
if (extraArgs) args = { ...args, ...extraArgs };
const [, res] = callUci('delete', args);
return res || undefined;
}
export function delUciOption(config, section, option, match, extraArgs) {
let args = { config, section, option };
if (match) args.match = match;
if (extraArgs) args = { ...args, ...extraArgs };
const [, res] = callUci('delete', args);
return res || undefined;
}
export function uciChanges(cfg, extraArgs) {
let args = { config: cfg };
if (extraArgs) args = { ...args, ...extraArgs };
const [, res] = callUci('changes', args);
return res && res.changes ? res.changes : undefined;
}
export function commitUci(cfg, extraArgs) {
let args = { config: cfg };
if (extraArgs) args = { ...args, ...extraArgs };
const [ret] = callUci('commit', args);
return ret;
}
export function revertUci(cfg, extraArgs) {
let args = { config: cfg };
if (extraArgs) args = { ...args, ...extraArgs };
const [ret] = callUci('revert', args);
return ret;
}

View File

@@ -1,268 +0,0 @@
/*
* Copyright (c) 2023 Genexis B.V. All rights reserved.
*
* This Software and its content are protected by the Dutch Copyright Act
* ('Auteurswet'). All and any copying and distribution of the software
* and its content without authorization by Genexis B.V. is
* prohibited. The prohibition includes every form of reproduction and
* distribution.
*
*/
/* eslint-disable no-undef */
/* eslint-disable no-bitwise */
import * as os from 'os';
import * as std from 'std';
export function macAddressToBase64(macAddress) {
// Split the MAC address into an array of bytes using the colon separator
const bytes = macAddress.split(':');
// Convert the bytes from hexadecimal to decimal
const decimalBytes = bytes.map((byte) => parseInt(byte, 16));
// Convert the decimal bytes into an array of 8-bit binary strings
const binaryBytes = decimalBytes.map((byte) => byte.toString(2).padStart(8, '0'));
// Concatenate the binary strings into a single string
const binaryString = binaryBytes.join('');
// Split the binary string into groups of 6 bits
const base64Chars = [];
for (let i = 0; i < binaryString.length; i += 6) {
base64Chars.push(binaryString.slice(i, i + 6));
}
// Convert each group of 6 bits to a decimal number
const decimalBase64 = base64Chars.map((char) => parseInt(char, 2));
// Create the base64 character set
const base64CharacterSet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
// Map the decimal numbers to their corresponding base64 characters
const base64String = decimalBase64.map((num) => base64CharacterSet.charAt(num)).join('');
return base64String;
}
export function tr181Path(path, uciPath, keyName, keyVal) {
if (!keyVal) {
return '';
}
const [uciConfig, uciType] = uciPath.split('.');
const args = { config: uciConfig, type: uciType };
const [, res] = _ubus_call('uci', 'get', args);
if (!res || !res.values) {
_log_error('tr181Path: invalid result');
return '';
}
let insts = Object.values(res.values);
if (uciConfig === 'network' && uciType === 'interface') {
insts = insts.filter((x) => x.device !== 'lo' && !x.device?.startsWith('@') && x.proto !== 'dhcpv6');
}
const index = insts.findIndex((x) => x[keyName] === keyVal);
if (index < 0) {
return '';
}
if (path.startsWith('Device.')) {
return `${path}${index + 1}`;
}
return `Device.${path}${index + 1}`;
}
export function tr181IPInterface(uci) {
return tr181Path('IP.Interface.', 'network.interface', '.name', uci);
}
export function prefixLengthToSubnetMask(prefixLength) {
if (!prefixLength) {
return '';
}
const mask = 0xFFFFFFFF << (32 - prefixLength);
const subnetMask = [
(mask >>> 24) & 0xFF,
(mask >>> 16) & 0xFF,
(mask >>> 8) & 0xFF,
mask & 0xFF,
].join('.');
return subnetMask;
}
export function fileExists(path) {
let exists = false;
if (path !== '') {
// eslint-disable-next-line no-unused-vars
const [obj, err] = os.stat(path);
exists = (err === 0);
}
return exists;
}
export function waitUntilFileExists(path, timeoutMs = 10000) {
const startTime = Date.now();
while (!fileExists(path) && (Date.now() - startTime < timeoutMs)) {
os.sleep(100);
}
return fileExists(path);
}
export function runCommand(command) {
const fp = std.popen(command, 'r');
if (fp) {
const result = fp.readAsString();
if (fp.close() === 0)
return result;
else
return undefined;
}
return undefined;
}
export function fileExistsWithRegex(directory, regex) {
const [files, err] = os.readdir(directory);
if (err) {
_log_warn(`fileExistsWithRegex(): Could not read directory: ${directory}`);
}
for (let i = 0; i < files.length; i += 1) {
if (regex.test(files[i])) {
return true;
}
}
return false;
}
export function isIPv4Address(addr) {
return addr?.includes('.');
}
export function isIPv6Address(addr) {
return addr?.includes(':');
}
// find the pathname in LowerLayers
export function findPathInLowerlayer(path, inst, instKey) {
const lowerlayer = _dm_get(`${path}.LowerLayers`);
if (lowerlayer === '') {
return false;
}
if (lowerlayer.includes(instKey)) {
if (lowerlayer.includes(inst)) {
return true;
}
} else {
const layers = lowerlayer.split(',');
if (layers.find((x) => findPathInLowerlayer(x, inst, instKey))) {
return true;
}
}
return false;
}
export function hex2a(hex) {
let i = 0;
let str = '';
for (i = 0; i < hex.length; i += 2) {
str += String.fromCharCode(parseInt(hex.substr(i, 2), 16));
}
return str;
}
export function capitalizeFirstLetter(string) {
return string.charAt(0).toUpperCase() + string.slice(1);
}
export function lowercaseFirstLetter(string) {
return string.charAt(0).toLowerCase() + string.slice(1);
}
export function getIfnameOperState(ifname) {
if (!ifname) {
return 'Down';
}
const res = std.loadFile(`/sys/class/net/${ifname}/operstate`);
if (res) {
return capitalizeFirstLetter(res.trim());
}
return 'Down';
}
export function getIfnameState(ifname, name) {
if (!ifname) {
return '';
}
const res = std.loadFile(`/sys/class/net/${ifname}/${name}`);
return res?.trim();
}
export function strToHex(str) {
if (!str) {
return '';
}
let hex = '';
for (let i = 0; i < str.length; i += 1) {
hex += str.charCodeAt(i).toString(16);
}
return hex;
}
// transform the object of following object:
// {
// 'SSIDtoVIDMapping.1.SSID': 'abc',
// 'SSIDtoVIDMapping.1.VID': 100,
// 'SSIDtoVIDMapping.2.SSID': 'xyz',
// 'SSIDtoVIDMapping.2.VID': 200,
// Enable: 'true'
// }
// into:
// {
// SSIDtoVIDMapping: [ { SSID: 'abc', VID: 100 }, { SSID: 'xyz', VID: 200 } ],
// Enable: 'true'
// }
export function transformInputObject(obj) {
const result = {};
Object.entries(obj).forEach(([key, value]) => {
const splitKey = key.split('.');
if (splitKey.length < 3) {
result[key] = value; // add invalid keys directly to the result
return;
}
const mainKey = splitKey[0];
const index = parseInt(splitKey[1], 10) - 1;
const prop = splitKey[2];
if (!result[mainKey]) {
result[mainKey] = [];
}
if (!result[mainKey][index]) {
result[mainKey][index] = {};
}
result[mainKey][index][prop] = value;
});
return result;
}
export function isTrue(val) {
return val === 'true' || val === '1' || val === true;
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,290 +0,0 @@
/*
* Copyright (c) 2023 Genexis B.V. All rights reserved.
*
* This Software and its content are protected by the Dutch Copyright Act
* ('Auteurswet'). All and any copying and distribution of the software
* and its content without authorization by Genexis B.V. is
* prohibited. The prohibition includes every form of reproduction and
* distribution.
*
*/
#ifndef DM_NODE_H
#define DM_NODE_H
#include "dm_types.h"
enum NODE_FLAG {
FLAG_NONE = 0x0,
FLAG_COUNTER = 0x40,
FLAG_HAS_MIN = 0x80,
FLAG_HAS_MAX = 0x100,
FLAG_HAS_ORDER = 0x400,
FLAG_WRITABLE = 0x800,
FLAG_CONFIDENTIAL = 0x1000,
FLAG_CWMP_ONLY = 0x4000,
FLAG_USP_ONLY = 0x8000,
FLAG_INTERNAL= 0x10000,
};
enum DM_UCI_MAP_TYPE {
DM_UCI_MAP_TYPE_NONE = 0,
DM_UCI_MAP_TYPE_SIMPLE = 0x01, // simple value 2 value
DM_UCI_MAP_TYPE_DISABLE = 0x02, // uci disable bool type
DM_UCI_MAP_TYPE_TABLE = 0x04, // a JSON object is used for mapping, {uci: dm}
DM_UCI_MAP_TYPE_INTERFACE = 0x08, // ip interface
DM_UCI_MAP_TYPE_JS = 0x10, // js code
};
struct dm_uci_map {
unsigned int type;
const char *map;
const char *key;
};
struct dm_node_info {
enum DM_NODE_TYPE type;
dm_node_id_t node_id;
const char *const name;
const struct dm_node_info *const parent;
const char *const table_name; // if NULL the value is not stored in the database
const char *pathname;
enum NODE_FLAG flag;
dm_node_id_t depends_node_id;
};
struct dm_parameter {
struct dm_node_info node;
enum DM_DATA_TYPE data_type;
long min;
long max;
int list;
union {
// for enum data type
const char **enum_strings;
// for counter data type
dm_node_id_t counter_object;
// for path data type
const dm_node_id_t *paths;
} data;
const char *set_on_create;
const char *js_val;
const char *const_val;
const char *default_val;
const char *default_uci_val;
struct dm_uci_map map;
};
struct command_arg {
const char *name;
enum DM_DATA_TYPE type;
int min;
int max;
int list;
const char **enum_values;
int mandatory;
};
struct dm_command {
struct dm_node_info node;
int async;
const struct command_arg *inputs;
int inputs_cnt;
const struct command_arg *outputs;
int outputs_cnt;
};
struct event_arg {
const char *name;
enum DM_DATA_TYPE type;
};
struct dm_event {
struct dm_node_info node;
const struct event_arg *args;
int args_cnt;
const char *ubus_event;
};
struct dm_object {
struct dm_node_info node;
int param_num;
const struct dm_node_info *const *const param_list;
int command_num;
const struct dm_node_info *const *const command_list;
int object_num;
const struct dm_node_info *const *const event_list;
int event_num;
const struct dm_node_info *const *const object_list;
int paths_refs_num;
const struct dm_node_info *const *const paths_refs_list;
const char *key_param_names;
struct dm_uci_map map;
const char *js_val;
int fixed_objects;
};
const struct dm_node_info *dm_node_get_root(void);
const struct dm_node_info *dm_node_get_info(dm_node_id_t id);
/**
* This function will look up a parameter by ID
* @pre None
* @post valid dm_parameter pointer returned or NULL if error
* @param id The actual id for which we want to retrieve a
* pointer to the dm_parameter struct
* @return NULL in case id is out of range or if id does not point
* to a parameter node, a pointer to the dm_parameter struct otherwise
*/
const struct dm_parameter *dm_node_get_parameter(dm_node_id_t id);
const struct dm_command *dm_node_get_command(dm_node_id_t id);
const struct dm_event *dm_node_get_event(dm_node_id_t id);
/**
* This function will look up a command by ID
* @pre None
* @post valid dm_command pointer returned or NULL if error
* @param id The actual id for which we want to retrieve a
* pointer to the dm_parameter struct
* @return NULL in case id is out of range or if id does not point
* to a command node, a pointer to the dm_command struct otherwise
*/
const struct dm_command *dm_node_get_command(dm_node_id_t id);
/**
* This function will look up an object by ID
* @pre None
* @post valid dm_object pointer returned or NULL if error
* @param id The actual id for which we want to retrieve a
* pointer to the dm_object struct
* @return NULL in case id is out of range or if id does not point
* to an object node, a pointer to the dm_object struct otherwise
* Note that if the id points to an object list a pointer to the
* first object is returned
*/
const struct dm_object *dm_node_get_object(dm_node_id_t id);
int dm_node_is_valid(dm_node_id_t id);
int dm_node_is_parameter(dm_node_id_t id);
int dm_node_is_command(dm_node_id_t id);
int dm_node_is_event(dm_node_id_t id);
int dm_node_is_writable(dm_node_id_t id);
int dm_node_is_object(dm_node_id_t id);
int dm_node_is_objectlist(dm_node_id_t id);
int dm_node_is_counter(dm_node_id_t id);
int dm_node_is_text_type(dm_node_id_t id);
int dm_node_is_bool_type(dm_node_id_t id);
int dm_node_is_ul_type(dm_node_id_t id);
int dm_node_is_unsigned_type(dm_node_id_t id);
int dm_node_is_confidential(dm_node_id_t id);
int dm_node_is_cwmp_only(dm_node_id_t id);
int dm_node_is_usp_only(dm_node_id_t id);
int dm_node_is_internal(dm_node_id_t id);
dm_node_id_t dm_node_counter_id(dm_node_id_t id);
int dm_node_has_db(dm_node_id_t id);
const char *dm_node_object_keys(dm_node_id_t id);
int dm_node_is_fixed_objects(dm_node_id_t id);
int dm_node_max_data_size(dm_node_id_t id);
int dm_node_param_mem_size(dm_node_id_t node_id);
const char *get_param_xsd_type(enum DM_DATA_TYPE type);
const char *dm_node_name(dm_node_id_t id);
dm_node_id_t dm_node_id_parent(dm_node_id_t id);
int dm_node_parent(const dm_node_t *node, dm_node_t *parent);
// get first multi-instance parent
int dm_node_i_parent(const dm_node_t *node, dm_node_t *parent);
dm_node_id_t dm_node_i_parent_id(const dm_node_id_t id);
int dm_node_index_cnt(dm_node_id_t id);
enum DM_DATA_TYPE dm_node_data_type(dm_node_id_t id);
dm_node_id_t dm_node_get_apply_depends(dm_node_id_t id);
dm_node_id_t dm_node_get_extends(dm_node_id_t id);
/*
Get the full name of a node.
@param node[in] Node ID whose name we want to retrieve
@param name[out] Location where the node name is written
@param name_len[in] Amount of space available in name
@return 0 in case of success, -1 in case of failure
*/
int dm_node2name(const dm_node_t *node, char *name, int name_len);
int dm_node2name_with_index(const dm_node_t *node, char *name, int name_len, const char *index_replacement);
int dm_name2node(const struct dm_node_info *parent, const char *name, dm_node_t *node);
int dm_path2node(const char *path, dm_node_t *node);
int dm_node_verify_param_data(dm_node_id_t id, const char *data);
// is parameter ancestor is the ancestor or parent of parameter id
int dm_node_is_ancestor(dm_node_id_t id, dm_node_id_t ancestor);
// is parameter data type a list (comman separated)
int dm_node_is_param_list(dm_node_id_t id);
dm_index_t dm_node_last_index(const dm_node_t *node);
int dm_node_is_index_complete(const dm_node_t *node);
/** Compare if two nodes are identical.
Two nodes are considered identical if their id, their index arrays and the cnt is the same
@param node1[in] first node for the comparison
@param node2[in] second node for the comparison
@return 1 if identical, 0 if not identical
*/
int dm_node_equal(const dm_node_t *node1, const dm_node_t *node2);
int dm_node_has_path(dm_node_id_t node_id, dm_node_id_t path);
// return 0-(max-1) for valid string, -1 for unknown enum string
int dm_node_get_enum_index(dm_node_id_t id, const char *enum_str);
// return the enum string for valid index: 0-(max-1), otherwise return NULL
const char *dm_node_get_enum_str(dm_node_id_t id, int index);
/** Remove path name from the path names separated with comma
@param paths path names separated with with comma
@param node node of path name that is to be removed
@return 0 if successful, -1 for failure
*/
int tr181_paths_remove(dm_path_t paths, const dm_node_t *node);
/** Append one path name to the path names separated with comma
@param paths path names separated with with comma
@param node node of path name that is to be appended
@return 0 if successful, -1 for failure
*/
int tr181_paths_add(dm_path_t paths, const dm_node_t *node);
// Find the "Order" parameter node from all its child nodes
int dm_node_find_order_param(const dm_node_t *obj_node, dm_node_t *order_node);
/** Return the database table for a node
@param node The node for which the database table is returned
@return pointer to the name of the database table (NULL if the node is not in the database
*/
const char *dm_node_get_table_name(const struct dm_node_info *node);
// Get child node id by its name
dm_node_id_t dm_node_get_child_id(dm_node_id_t id, const char *name);
// get child node by name
int dm_node_get_child(const dm_node_t *node, const char *name, dm_node_t *child);
const char *dm_node_str(const dm_node_t *node);
const char *dm_node_id_str(const dm_node_id_t id);
// compare if the nodes are compatible, this is if node1 and node2 have the same id and
// all indexes of node1 are a subset of node2 or vice versa
int dm_node_compatible(const dm_node_t *node1, const dm_node_t *node2);
// get the string xsd type of the data type
const char *dm_node_get_param_xsd_type(dm_node_id_t id);
/** Get the output argument type of the command node
@param [in] id node id
@param [in] arg_name argument name of the command
@return pointer of const string if successful, NULL for failure
*/
const struct command_arg *dm_node_get_command_output_arg(dm_node_id_t id, const char *arg_name);
// return 1 if verified, otherwise 0.
int dm_node_verify_command_input(dm_node_id_t id, const char *input_name, const char *input_value);
// Compare the command argument pathname with index (index part will be skipped for comparision)
// ex, "result.{i}.abc" == "result.100.abc"
// return 0 if equal, otherwise 1
int dm_node_compare_command_arg_name(const char* str1, const char* str2);
#endif

View File

@@ -1,97 +0,0 @@
/*
* Copyright (c) 2023 Genexis B.V. All rights reserved.
*
* This Software and its content are protected by the Dutch Copyright Act
* ('Auteurswet'). All and any copying and distribution of the software
* and its content without authorization by Genexis B.V. is
* prohibited. The prohibition includes every form of reproduction and
* distribution.
*
*/
#ifndef DM_TYPES_H
#define DM_TYPES_H
#define MAX_DM_NODE_DEPTH 8
#define INVALID_DM_INDEX ((dm_index_t)0)
#define INVALID_DM_NODE_ID ((dm_node_id_t)-1)
typedef unsigned int dm_node_id_t;
typedef unsigned int dm_index_t;
typedef dm_index_t dm_index_path_t[MAX_DM_NODE_DEPTH];
typedef struct
{
dm_index_path_t index;
} node_index_path_t;
typedef struct
{
dm_node_id_t id;
dm_index_path_t index;
int cnt;
} dm_node_t;
#define dm_init_node(id) \
{ \
id, {0}, 0 \
}
typedef unsigned int dm_uint_t;
typedef int dm_int_t;
typedef int dm_bool_t;
typedef char dm_enum_t[128];
typedef char dm_ip_t[64];
// IPv4 or IPv6 routing prefix in Classless Inter-Domain Routing (CIDR) notation [RFC4632].
// This is specified as an IP address followed by an appended "/n" suffix,
// where n (the prefix size) is an integer in the range 0-32 (for IPv4) or 0-128 (for IPv6)
// that indicates the number of (leftmost) '1' bits of the routing prefix.
// If the IP address part is unspecified or inapplicable, it MUST be an empty string unless
// otherwise specified by the parameter definition. In this case the IP prefix will be of the form "/n".
// IPv4 example: 192.168.1.0/24
// IPv6 example: 2001:edff:fe6a:f76::/64
typedef char dm_ip_prefix_t[64];
typedef char dm_mac_t[20]; // 18 would suffice but there can be word access when getting a value; hence rounded up to 20 */
typedef char dm_date_time_t[64];
typedef char dm_url_t[260];
typedef unsigned long dm_ulong_t;
typedef unsigned long long dm_ulonglong_t;
typedef char dm_path_t[1024];
typedef char dm_domain_t[256];
#define dm_true 1
#define dm_false 0
enum DM_DATA_TYPE {
DM_DATA_INT = 0,
DM_DATA_LONG,
DM_DATA_UINT,
DM_DATA_ULONG,
DM_DATA_BOOLEAN,
DM_DATA_STRING,
DM_DATA_HEXBINARY,
DM_DATA_BASE64,
DM_DATA_IP,
DM_DATA_IPV4,
DM_DATA_IPV6,
DM_DATA_IP_PREFIX,
DM_DATA_IPV6_PREFIX,
DM_DATA_MAC,
DM_DATA_DATETIME,
DM_DATA_ENUM,
DM_DATA_URL,
DM_PATH_NAME,
DM_DATA_UNKNOWN
};
enum DM_NODE_TYPE {
DM_NODE_PARAMETER = 0,
DM_NODE_OBJECT,
DM_NODE_OBJECT_LIST,
DM_NODE_COMMAND,
DM_NODE_EVENT
};
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,508 +0,0 @@
/* eslint-disable no-await-in-loop */
/*
* Copyright (c) 2023 Genexis B.V. All rights reserved.
*
* This Software and its content are protected by the Dutch Copyright Act
* ('Auteurswet'). All and any copying and distribution of the software
* and its content without authorization by Genexis B.V. is
* prohibited. The prohibition includes every form of reproduction and
* distribution.
*
*/
const assert = require('assert');
const fs = require('fs');
const xml2js = require('xml2js');
const util = require('util');
const readFile = util.promisify(fs.readFile);
const writeFile = util.promisify(fs.writeFile);
const parser = new xml2js.Parser();
const parseXML = util.promisify(parser.parseString);
const cwmpTr181XmlFile = 'tr-181-2-19-1-cwmp-full.xml';
const uspTr181XmlFile = 'tr-181-2-19-1-usp-full.xml';
const uspTr181VendorExtXmlFile = 'tr-181-vendor-extensions-usp.xml';
const Tr104USPXmlFile = 'tr-104-2-0-2-usp-full.xml';
const Tr104CWMPXmlFile = 'tr-104-2-0-2-cwmp-full.xml';
let cwmpModel;
let uspModel;
let uspVendorExtModel;
let tr181 = true;
async function saveFile(file, obj) {
await writeFile(file, JSON.stringify(obj, null, 4));
console.log('saved file:', file);
}
function getRange(attr) {
if (typeof attr[0] !== 'object') {
return '';
}
if (attr[0].range) {
const range = attr[0].range[0].$;
return `(${range.minInclusive ?? ''}:${range.maxInclusive ?? ''})`;
}
return '';
}
function objParent(obj) {
let parent;
if (obj.endsWith('}.')) {
parent = obj.slice(0, -5);
} else {
parent = obj.slice(0, -1);
}
return parent.substring(0, parent.lastIndexOf('.'));
}
function parsePathRef(obj, ref) {
let path = obj.$.name;
ref = ref.trim().replace(/\n$/, '');
if (ref.startsWith('.')) {
if (path.startsWith('Device.Services.VoiceService.') && !path.endsWith('()')) {
return `Device.Services.VoiceService.{i}${ref}`;
}
return `Device${ref}`;
}
if (ref.startsWith('#')) {
while (ref.startsWith('#')) {
path = objParent(path);
ref = ref.slice(1);
}
return path + ref;
}
if (ref.startsWith('Device.')) {
return ref;
}
return path + ref;
}
function getParamType(obj, res, syntaxType) {
const intTypes = [
'int',
'long',
'unsignedInt',
'unsignedLong',
];
const intType = intTypes.find((x) => Object.keys(syntaxType).includes(x));
if (intType) {
res.dataType = `${intType}${getRange(syntaxType[intType])}`;
} else if (syntaxType.boolean) {
res.dataType = 'boolean';
} else if (syntaxType.dateTime) {
res.dataType = 'dateTime';
} else if (syntaxType.string) {
const attr = syntaxType.string[0];
if (typeof attr === 'object') {
if (attr.enumeration) {
res.dataType = 'enum';
res.enum = attr.enumeration.map((x) => x.$.value);
} else if (attr.pathRef) {
res.dataType = 'pathRef';
if (attr.pathRef[0].$.targetParent) {
res.pathRef = attr.pathRef[0].$.targetParent.split(' ').filter((x) => x).map((x) => parsePathRef(obj, x));
}
} else if (attr.enumerationRef) {
res.dataType = 'enum';
res.enumerationRef = parsePathRef(obj, attr.enumerationRef[0].$.targetParam);
} else if (attr.size) {
res.dataType = `string(${attr.size[0].$?.minLength ?? ''}:${attr.size[0].$?.maxLength ?? ''})`;
} else if (attr.pattern) {
// handle it as enum
res.dataType = 'enum';
res.enum = attr.pattern.map((x) => x.$.value);
} else {
assert(false, `unknown string type: ${JSON.stringify(syntaxType, null, 2)}`);
}
} else {
res.dataType = 'string';
}
} else if (syntaxType.dataType) {
res.dataType = syntaxType.dataType[0].$.ref;
if (res.dataType === 'Alias') {
res.dataType = 'string(:64)';
} else if (res.dataType === 'DiagnosticsState') {
res.dataType = 'enum';
res.enum = ['None', 'Requested', 'Canceled', 'Complete', 'Error'];
} else if (res.dataType === 'StatsCounter64') {
res.dataType = 'unsignedLong';
}
} else if (syntaxType.hexBinary) {
res.dataType = 'hexBinary';
const { size } = syntaxType.hexBinary[0];
if (size) {
res.dataType += `(${size[0].$?.minLength ?? ''}:${size[0].$?.maxLength ?? ''})`;
}
} else if (syntaxType.base64) {
res.dataType = 'base64';
const { size } = syntaxType.base64[0];
if (size) {
res.dataType += `(${size[0].$?.minLength ?? ''}:${size[0].$?.maxLength ?? ''})`;
}
} else if (syntaxType.decimal) {
res.dataType = 'decimal';
const { size } = syntaxType.decimal[0];
if (size) {
res.dataType += `(${size[0].$?.minLength ?? ''}:${size[0].$?.maxLength ?? ''})`;
}
} else {
console.log(`unknown datatype:\n ${JSON.stringify(syntaxType, null, 4)}`);
res.dataType = 'unknown';
}
if (syntaxType.list) {
res.dataType += '[]';
}
}
function getParamObj(obj, param, proto) {
const res = {
name: param.$.name,
};
if (proto) {
res.proto = proto;
}
res.access = param.$.access;
const syntaxType = param.syntax[0];
getParamType(obj, res, syntaxType);
if (syntaxType.$?.hidden) {
res.hidden = true;
}
if (syntaxType.default && syntaxType.default[0].$?.value) {
const def = syntaxType.default[0].$?.value;
if (def !== 'false' && def !== '') {
res.default = syntaxType.default[0].$?.value;
}
}
return res;
}
let allObjects = [];
function generateCWMPObjects(objs) {
objs.forEach((obj) => {
const o = allObjects.find((x) => x.object === obj.$.name);
if (o) {
const params = obj.parameter.map((param) => getParamObj(obj, param, 'cwmp'));
o.parameters = o.parameters.concat(params);
} else {
allObjects.push({
object: obj.$.name,
proto: 'cwmp',
fixedObject: obj.$['dmr:fixedObject'],
uniqueKeys: obj.uniqueKey?.map((x) => x.parameter[0].$.ref).join(','),
// numEntriesParameter: obj.$.numEntriesParameter,
access: obj.$.access,
parameters: obj.parameter?.map((param) => (
getParamObj(obj, param, 'cwmp'))) ?? [],
});
}
});
}
function getCommandInput(cmdInfo) {
if (!cmdInfo.input) {
return [];
}
const objParams = [];
if (cmdInfo.input[0].object) {
cmdInfo.input[0].object.forEach((obj) => {
obj.parameter.forEach((p) => {
const inputParams = {
parameter: obj.$.name + p.$.name,
mandatory: p.$.mandatory === 'true',
};
getParamType(cmdInfo, inputParams, p.syntax[0]);
objParams.push(inputParams);
});
});
}
const params = cmdInfo.input[0].parameter?.map((p) => {
const inputParams = {
parameter: p.$.name,
mandatory: p.$.mandatory === 'true',
};
getParamType(cmdInfo, inputParams, p.syntax[0]);
return inputParams;
});
return objParams.concat(params ?? []);
}
function getCommandOutput(cmdInfo) {
if (!cmdInfo.output) {
return [];
}
const outParams = cmdInfo.output[0].parameter?.map((p) => {
const outputs = {
parameter: p.$.name,
};
getParamType(cmdInfo, outputs, p.syntax[0]);
return outputs;
}) ?? [];
const outObjs = cmdInfo.output[0].object?.map((obj) => {
const outputs = {
object: obj.$.name,
};
outputs.parameters = obj.parameter?.map((p) => {
const outs = {
parameter: p.$.name,
};
getParamType(cmdInfo, outs, p.syntax[0]);
return outs;
});
return outputs;
}) ?? [];
return outParams.concat(outObjs);
}
function generateUSPObjects(objs) {
objs.forEach((obj) => {
const o = allObjects.find((x) => x.object === obj.$.name);
if (o) {
delete o.proto;
obj.parameter?.forEach((p) => {
const param = o.parameters.find((x) => x.name === p.$.name);
if (param) {
delete param.proto;
} else {
o.parameters.push(getParamObj(obj, p, 'usp'));
}
});
if (obj.command) {
const cmds = obj.command.map((cmd) => ({
name: cmd.$.name,
async: !!cmd.$.async,
input: getCommandInput(cmd),
output: getCommandOutput(cmd),
}));
if (o.commands) {
o.commands = o.commands.concat(cmds);
} else {
o.commands = cmds;
}
}
if (obj.event) {
const events = obj.event.map((ev) => ({
name: ev.$.name,
parameter: ev.parameter?.map((p) => p.$.name),
}));
if (o.events) {
o.events = o.events.concat(events);
} else {
o.events = events;
}
}
} else {
const cwmpObj = cwmpModel.object.find((x) => x.$.name === obj.$.name);
const newObj = {
object: obj.$.name,
proto: cwmpObj ? undefined : 'usp',
uniqueKeys: obj.uniqueKey?.map((x) => x.parameter[0].$.ref).join(','),
// numEntriesParameter: obj.$.numEntriesParameter,
access: obj.$.access,
fixedObject: obj.$['dmr:fixedObject'],
parameters: obj.parameter?.map((param) => (
getParamObj(obj, param, (cwmpObj && cwmpObj.parameter?.find((x) => x.$.name === param.$.name)) ? undefined : 'usp'))) ?? [],
};
if (obj.command) {
newObj.commands = obj.command.map((cmd) => ({
name: cmd.$.name,
async: !!cmd.$.async,
input: getCommandInput(cmd),
output: getCommandOutput(cmd),
}));
}
if (obj.event) {
newObj.events = obj.event.map((ev) => ({
name: ev.$.name,
parameter: ev.parameter?.map((p) => p.$.name),
}));
}
allObjects.push(newObj);
}
});
}
function mergeProfileObjs(obj1, obj2) {
if (!obj1) {
return obj2;
}
if (!obj2) {
return obj1;
}
obj2.forEach((obj) => {
const o = obj1.find((x) => x.object === obj.object);
if (o) {
if (o.parameter) {
o.parameter = o.parameter.concat(obj.parameter ?? []);
} else {
o.parameter = obj.parameter;
}
if (o.command) {
o.command = o.command.concat(obj.command ?? []);
} else {
o.command = obj.command;
}
if (o.event) {
o.event = o.event.concat(obj.event ?? []);
} else {
o.event = obj.event;
}
} else {
obj1.push(obj);
}
});
return obj1;
}
function parseProfileObjects(model, profileName) {
const profile = model.profile.find((x) => x.$.name === profileName);
if (!profile) {
return [];
}
// assert(profile, `profile not found ${profileName}`);
const objs = profile.object?.map((o) => ({
object: o.$.ref,
parameter: o.parameter?.map((p) => p.$.ref),
command: o.command?.map((c) => c.$.ref),
event: o.event?.map((e) => e.$.ref),
}));
const exts = profile.$.extends ?? profile.$.base;
if (exts) {
let res = objs;
exts.split(' ').forEach((ext) => {
const extObjs = parseProfileObjects(model, ext);
res = mergeProfileObjs(res, extObjs);
});
return res;
}
return objs;
}
async function getProfileObjects(model, profileName) {
const profileObjs = parseProfileObjects(model, profileName);
const objs = [];
profileObjs.forEach((obj) => {
const targetObj = model.object.find((o) => o.$.name === obj.object);
assert(targetObj, `object not found ${obj.object}`);
const keys = targetObj.uniqueKey?.map((x) => x.parameter[0].$.ref);
targetObj.parameter = targetObj.parameter?.filter((x) => obj.parameter?.includes(x.$.name) || (keys?.includes(x.$.name)));
targetObj.command = targetObj.command?.filter((x) => obj.command?.includes(x.$.name));
objs.push(targetObj);
});
return objs;
}
async function loadXMLModel(file) {
const xmlData = await readFile(file, 'utf8');
const jsonData = await parseXML(xmlData);
const [model] = jsonData['dm:document'].model;
return model;
}
function printUsage() {
console.log('Usage:\nnode makeDM.js <tr181|tr104> [profile]');
}
(async () => {
try {
if (process.argv.length < 3 || (process.argv[2] !== 'tr181' && process.argv[2] !== 'tr104')) {
printUsage();
process.exit(-1);
}
if (process.argv[2] === 'tr104') {
tr181 = false;
}
if (tr181 && fs.existsSync(uspTr181VendorExtXmlFile)) {
uspVendorExtModel = await loadXMLModel(uspTr181VendorExtXmlFile);
} else if (tr181) {
console.warn(`Optional vendor extension file '${uspTr181VendorExtXmlFile}' not found, skipping.`);
}
if (tr181) {
cwmpModel = await loadXMLModel(cwmpTr181XmlFile);
uspModel = await loadXMLModel(uspTr181XmlFile);
} else {
cwmpModel = await loadXMLModel(Tr104CWMPXmlFile);
uspModel = await loadXMLModel(Tr104USPXmlFile);
cwmpModel.object.forEach((obj) => {
obj.$.name = `Device.Services.${obj.$.name}`;
});
cwmpModel.profile.forEach((prof) => {
prof.object?.forEach((obj) => {
obj.$.ref = `Device.Services.${obj.$.ref}`;
});
});
uspModel.object.forEach((obj) => {
obj.$.name = `Device.Services.${obj.$.name}`;
});
uspModel.profile.forEach((prof) => {
prof.object?.forEach((obj) => {
obj.$.ref = `Device.Services.${obj.$.ref}`;
});
});
}
if (process.argv.length === 3) {
generateCWMPObjects(cwmpModel.object);
generateUSPObjects(uspModel.object);
if (tr181 && uspVendorExtModel) {
generateUSPObjects(uspVendorExtModel.object);
}
const fileName = `${tr181 ? 'tr181' : 'tr104'}-full-objects.json`;
await saveFile(fileName, allObjects);
process.exit(0);
}
for (const arg of process.argv.slice(3)) {
// profile
const profile = arg;
const cwmpObjects = await getProfileObjects(cwmpModel, profile);
const uspObjects = await getProfileObjects(uspModel, profile);
generateCWMPObjects(cwmpObjects);
generateUSPObjects(uspObjects);
await saveFile(`${profile}.json`, allObjects);
allObjects = [];
}
} catch (error) {
console.error(`Error while reading file: ${error}`);
console.log(error.stack);
}
})();

View File

@@ -1,88 +0,0 @@
// This script is used to load and validate the js handlers code in dm-file.
(function () {
const fs = require('fs');
const path = require('path');
const { spawnSync } = require('child_process');
// Root directory (dm-files) relative to this script
const dmFilesRoot = path.resolve(__dirname, '../dm-files');
/**
* Recursively walk a directory and collect all *.js files that do not start with a dot.
* @param {string} dir - directory to walk
* @param {string[]} out - accumulator for file paths
*/
function collectJsFiles(dir, out) {
const entries = fs.readdirSync(dir, { withFileTypes: true });
for (const entry of entries) {
// Skip hidden files/directories (starting with ".")
if (entry.name.startsWith('.')) {
continue;
}
const fullPath = path.join(dir, entry.name);
if (entry.isDirectory()) {
collectJsFiles(fullPath, out);
} else if (entry.isFile() && fullPath.endsWith('.js')) {
out.push(fullPath);
}
}
}
/**
* Validate a single JavaScript file with QuickJS (qjs).
* Exits the process with -1 on failure.
* @param {string} filePath - absolute path of the JS file
*/
function validateWithQjs(filePath) {
// Extract directory and filename for proper working directory
const fileDir = path.dirname(filePath);
const fileName = path.basename(filePath);
// Capture stdout/stderr so we can print them on failure
// Set the working directory to the file's directory
const result = spawnSync('qjs', [fileName], {
encoding: 'utf8',
cwd: fileDir
});
if (result.status === 0) {
return; // Validated successfully
}
// Show QuickJS output so user sees error details
console.error(`\n===== QuickJS validation failed: ${filePath} =====`);
if (result.stdout) {
console.error(result.stdout.trim());
}
if (result.stderr) {
console.error(result.stderr.trim());
}
console.error('===============================================');
process.exit(-1);
}
function main() {
if (!fs.existsSync(dmFilesRoot)) {
console.error(`dm-files directory not found at: ${dmFilesRoot}`);
process.exit(-1);
}
const jsFiles = [];
collectJsFiles(dmFilesRoot, jsFiles);
if (jsFiles.length === 0) {
console.log('No JavaScript files found to validate.');
return;
}
console.log(`Validating ${jsFiles.length} JavaScript file(s) with QuickJS...`);
jsFiles.forEach(validateWithQjs);
console.log('All files validated successfully.');
}
// Execute when run directly (not required when imported)
if (require.main === module) {
main();
}
})();

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@@ -1,64 +0,0 @@
#
# Copyright (c) 2023 Genexis B.V. All rights reserved.
# This Software and its content are protected by the Dutch Copyright Act
# ('Auteurswet'). All and any copying and distribution of the software
# and its content without authorization by Genexis B.V. is
# prohibited. The prohibition includes every form of reproduction and
# distribution.
#
#
include $(TOPDIR)/rules.mk
PKG_NAME:=bridgemngr
PKG_VERSION:=1.0.0
PKG_RELEASE:=1
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)
PLATFORM_CONFIG:=$(TOPDIR)/.config
include $(INCLUDE_DIR)/package.mk
include ../../bbfdm/bbfdm.mk
define Package/$(PKG_NAME)
DEPENDS:=+dm-api +datamodels +libubox +libubus +ubus
CATEGORY:=Genexis
TITLE:=GeneOS agent
URL:=http://www.genexis.eu
PKG_LICENSE:=GENEXIS
PKG_LICENSE_URL:=
endef
define Package/$(PKG_NAME)/description
This package contains GeneOS agent.
endef
define Build/Prepare
mkdir -p $(PKG_BUILD_DIR)
$(CP) -rf ./src/* $(PKG_BUILD_DIR)/
endef
TARGET_CFLAGS += $(FPIC) -I$(PKG_BUILD_DIR)
define Build/Compile
$(MAKE) -C $(PKG_BUILD_DIR)\
PROJECT_ROOT="$(PKG_BUILD_DIR)" \
CROSS_COMPILE="$(TARGET_CROSS)" \
ARCH="$(LINUX_KARCH)" \
EXTRA_CFLAGS="$(TARGET_CFLAGS)" \
all
endef
define Package/$(PKG_NAME)/install
$(INSTALL_DIR) $(1)/usr/sbin
$(INSTALL_DIR) $(1)/etc/init.d
$(INSTALL_DIR) $(1)/etc/config
$(INSTALL_BIN) ./files/etc/init.d/bridging $(1)/etc/init.d/
$(INSTALL_DATA) ./files/etc/config/bridging $(1)/etc/config/
$(BBFDM_REGISTER_SERVICES) ./bbfdm_service.json $(1) $(PKG_NAME)
$(INSTALL_DIR) $(1)/lib/upgrade/keep.d
# $(INSTALL_BIN) ./files/etc/init.d/dm-agent $(1)/etc/init.d/dm-agent
$(INSTALL_BIN) $(PKG_BUILD_DIR)/dm-agent $(1)/usr/sbin
endef
$(eval $(call BuildPackage,$(PKG_NAME)))

View File

@@ -1,17 +0,0 @@
{
"daemon": {
"enable": "1",
"service_name": "bridgemngr",
"dm-framework": true,
"unified_daemon": false,
"services": [
{
"parent_dm": "Device.",
"object": "Bridging"
}
],
"config": {
"loglevel": "3"
}
}
}

View File

@@ -1,33 +0,0 @@
#L2 filter UCI file
config chain 'qos_output'
option target 'qos_output'
option table 'nat'
option chain 'OUTPUT'
option policy 'RETURN'
config chain 'dscp2pbits'
option target 'dscp2pbits'
option table 'broute'
option chain 'BROUTING'
option policy 'RETURN'
config chain 'qos'
option target 'qos'
option table 'broute'
option chain 'BROUTING'
option policy 'RETURN'
config chain 'prevlanxlate'
option target 'prevlanxlate'
option table 'broute'
option chain 'BROUTING'
option policy 'RETURN'
option append 'false'
config chain 'mcsnooping'
option target 'mcsnooping'
option table 'broute'
option chain 'BROUTING'
option policy 'RETURN'
option append 'false'

View File

@@ -1,94 +0,0 @@
#!/bin/sh /etc/rc.common
# Start after bdmf shell, wanconf, and switch-script but before the network-script
START=20
STOP=10
USE_PROCD=1
. /lib/functions.sh
handle_ebtables_chain() {
local sid="$1"
local table
local chain
local target
local policy
local append
local enabled
local ret
config_get table "$sid" table filter
config_get chain "$sid" chain
config_get policy "$sid" policy RETURN
config_get target "$sid" target
config_get_bool append "$sid" append 1
config_get_bool enabled "$sid" enabled 1
[ "$enabled" = "0" ] && return
[ -z "${chain}" -o -z "${target}" ] && return
if [ "$append" != "0" ]; then
append="-A"
else
append="-I"
fi
ebtables --concurrent -t "$table" -N "$target" -P "$policy" 2> /dev/null
ret=$?
if [ $ret -eq 0 ]; then
ebtables --concurrent -t "$table" ${append} "$chain" -j "$target"
else
ebtables --concurrent -t "$table" -D "$chain" -j "$target"
ebtables --concurrent -t "$table" ${append} "$chain" -j "$target"
fi
}
handle_ebtables_rule() {
local sid="$1"
local table
local chain
local target
local match
local value
local enabled
local ret
config_get table "$sid" table filter
config_get chain "$sid" chain
config_get match "$sid" match
config_get value "$sid" value
config_get target "$sid" target RETURN
config_get_bool append "$sid" append 1
config_get_bool enabled "$sid" enabled 1
[ "$enabled" = "0" ] && return
[ -z "${chain}" -o -z "${target}" ] && return
if [ "$append" != "0" ]; then
append="-A"
else
append="-I"
fi
ebtables --concurrent -t "$table" -D "$chain" ${match} -j "$target" ${value} 2> /dev/null
ebtables --concurrent -t "$table" ${append} "$chain" ${match} -j "$target" ${value}
}
start_service() {
ubus -t 30 wait_for network.device uci
config_load bridging
config_foreach handle_ebtables_chain chain
config_foreach handle_ebtables_rule rule
}
reload_service() {
stop
start
}
service_triggers() {
procd_add_reload_trigger bridging
}

View File

@@ -1,37 +0,0 @@
#
# Copyright (c) 2023 Genexis B.V. All rights reserved.
# This Software and its content are protected by the Dutch Copyright Act
# ('Auteurswet'). All and any copying and distribution of the software
# and its content without authorization by Genexis B.V. is
# prohibited. The prohibition includes every form of reproduction and
# distribution.
#
#
PROG = dm-agent
SRCS = dm_agent.c
OBJS = $(SRCS:.c=.o)
DEPS = $(SRCS:.c=.d)
CC = $(CROSS_COMPILE)gcc
STRIP = $(CROSS_COMPILE)strip
CFLAGS = -Wall -Werror $(EXTRA_CFLAGS)
CFLAGS += -MMD -MP -std=gnu99
LDFLAGS += -ldm -ldmapi -lubus -luci -lubox -ljson-c -lblobmsg_json
all: $(PROG)
$(PROG): $(OBJS)
$(CC) $^ $(LDFLAGS) -o $@
%.o: %.c
$(CC) -c $(CFLAGS) $^ -o $@
clean:
rm -f $(PROG) *.o core $(DEPS)
-include $(DEPS)

File diff suppressed because it is too large Load Diff

View File

@@ -1,75 +0,0 @@
#
# Copyright (c) 2023 Genexis B.V. All rights reserved.
# This Software and its content are protected by the Dutch Copyright Act
# ('Auteurswet'). All and any copying and distribution of the software
# and its content without authorization by Genexis B.V. is
# prohibited. The prohibition includes every form of reproduction and
# distribution.
#
#
include $(TOPDIR)/rules.mk
PKG_NAME:=dm-api
PKG_VERSION:=1.0
PKG_RELEASE:=1
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)
PLATFORM_CONFIG:=$(TOPDIR)/.config
AUTO_CONF_H:=$(PKG_BUILD_DIR)/autoconf.h
include $(INCLUDE_DIR)/package.mk
define Package/$(PKG_NAME)
CATEGORY:=Genexis
TITLE:=dm-api
PKG_BUILD_DEPENDS:=datamodels
DEPENDS:=+libsqlite3 \
+libjson-c +libstdcpp +quickjs \
+libubus +libubox +libuci
# Depedencies for RG products
URL:=http://www.genexis.eu
PKG_LICENSE:=GENEXIS
PKG_LICENSE_URL:=
endef
define Package/$(PKG_NAME)/description
This package contains api for the dm-framework
endef
define Build/Prepare
$(CP) -rf ./src/* $(PKG_BUILD_DIR)/
endef
TARGET_CFLAGS += $(FPIC) -I$(PKG_BUILD_DIR)
define Build/Compile
$(MAKE) -C $(PKG_BUILD_DIR)\
PROJECT_ROOT="$(PKG_BUILD_DIR)" \
CROSS_COMPILE="$(TARGET_CROSS)" \
ARCH="$(LINUX_KARCH)" \
EXTRA_CFLAGS="$(TARGET_CFLAGS)" \
all
endef
define Build/InstallDev
$(INSTALL_DIR) $(1)/usr/include
$(INSTALL_DIR) $(1)/usr/lib
$(CP) $(PKG_BUILD_DIR)/core/dm_api.h $(1)/usr/include/
$(CP) $(PKG_BUILD_DIR)/core/dm_linker.h $(1)/usr/include/
$(CP) $(PKG_BUILD_DIR)/core/dbmgr.h $(1)/usr/include/
$(CP) $(PKG_BUILD_DIR)/include/dm_log.h $(1)/usr/include/
$(CP) $(PKG_BUILD_DIR)/utils/dm_list.h $(1)/usr/include/
$(CP) $(PKG_BUILD_DIR)/libdmapi.so $(1)/usr/lib/
endef
define Package/$(PKG_NAME)/install
$(INSTALL_DIR) $(1)/usr/lib
$(INSTALL_DIR) $(1)/sbin/
$(INSTALL_BIN) $(PKG_BUILD_DIR)/libdmapi.so $(1)/usr/lib/
endef
$(eval $(call BuildPackage,$(PKG_NAME)))

View File

@@ -1,64 +0,0 @@
#
# Copyright (c) 2023 Genexis B.V. All rights reserved.
# This Software and its content are protected by the Dutch Copyright Act
# ('Auteurswet'). All and any copying and distribution of the software
# and its content without authorization by Genexis B.V. is
# prohibited. The prohibition includes every form of reproduction and
# distribution.
#
#
PROG = libdmapi.so
SRCS = \
core/dm_api.c \
core/dm_linker.c \
core/dbmgr.c \
core/inode_buf.c \
core/dm_apply.c \
core/dm_import.c \
core/db_upgrade.c \
utils/dm_list.c \
utils/dm_log.c \
utils/ubus_client.c \
utils/utils.c \
quickjs/qjs.c \
quickjs/qjs_log.c \
quickjs/qjs_dm_api.c \
quickjs/qjs_uci_api.c \
quickjs/qjs_ubus_api.c
OBJS = $(SRCS:.c=.o)
DEPS = $(SRCS:.c=.d)
CC = $(CROSS_COMPILE)gcc
STRIP = $(CROSS_COMPILE)strip
CFLAGS += \
-I core \
-I $(STAGING_DIR)/usr/include $(EXTRA_CFLAGS) \
-I handlers/common \
-I handlers/tr181 \
-I include \
-I utils \
-I quickjs
CFLAGS += -MMD -MP -std=gnu99
LDFLAGS = -shared
CFLAGS += -Wall -Werror -fpic
LDFLAGS += -lquickjs -lsqlite3 -latomic
# END for src from mgmt-agent
all: $(PROG)
$(PROG): $(OBJS)
$(CC) $^ $(LDFLAGS) -o $@
%.o: %.c
$(CC) -c $(CFLAGS) $^ -o $@
clean:
rm -f $(PROG) *.o core $(DEPS)
-include $(DEPS)

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
/*
* Copyright (c) 2023 Genexis B.V. All rights reserved.
*
* This Software and its content are protected by the Dutch Copyright Act
* ('Auteurswet'). All and any copying and distribution of the software
* and its content without authorization by Genexis B.V. is
* prohibited. The prohibition includes every form of reproduction and
* distribution.
*
*/
#ifndef DB_UPGRADE_H
#define DB_UPGRADE_H
int get_db_user_version(const char *db_path, int *user_version);
int upgrade_db(const char *curr_db, const char *new_db, int new_version);
#endif

View File

@@ -1,684 +0,0 @@
/*
* Copyright (c) 2023 Genexis B.V. All rights reserved.
*
* This Software and its content are protected by the Dutch Copyright Act
* ('Auteurswet'). All and any copying and distribution of the software
* and its content without authorization by Genexis B.V. is
* prohibited. The prohibition includes every form of reproduction and
* distribution.
*
*/
#include "dbmgr.h"
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "dm.h"
#include "dm_log.h"
#include "dm_node.h"
#include "sqlite3.h"
#define MAX_SQL_STATEMENT_LEN 4096
static sqlite3 *transaction_db = NULL;
static char sql_statement[MAX_SQL_STATEMENT_LEN];
// sqlite busy handler: the SQLITE_BUSY could happen when one process trys to access db
// (read out of session in our case) while another process is in the process of commiting
// a transaction. the busy time is normally short within 100 ms.
static int db_busy_handler(void *context, int count)
{
dmlog_debug("db_busy_handler %s, %d", sql_statement, count);
// non-zero means retry..
return 1;
}
static char *_get_node_rel_path_name(const struct dm_node_info *head,
const struct dm_node_info *node)
{
static char _path_name[512] = {'\0'};
if (node != NULL && node != head) {
_get_node_rel_path_name(head, node->parent);
}
if ((node == head) || (node == NULL)) /* end of recursion make sure pathname is the empty string */
{
memset(_path_name, 0, sizeof(_path_name));
} else {
strcat(_path_name, node->name);
strcat(_path_name, "_");
}
return _path_name;
}
static const char *get_node_rel_path_name(const struct dm_node_info *head,
const struct dm_node_info *node)
{
char *name = _get_node_rel_path_name(head, node);
int len = strlen(name);
// remove last '_'
if (len > 0) {
name[len - 1] = '\0';
}
return name;
}
static const char *get_table_field_name(const struct dm_node_info *node)
{
const struct dm_node_info *p = node;
while (p->parent != dm_node_get_root()) {
if (p->type == DM_NODE_OBJECT_LIST) {
break;
}
p = p->parent;
}
return get_node_rel_path_name(p, node);
}
static const char *get_int_str(int i)
{
static char digit[16];
sprintf(digit, "%d", i);
return digit;
}
static void get_db_index_condition(const struct dm_node_info *info, char *buf, const dm_index_t indexes[], int *cnt)
{
if (info == NULL) {
return;
}
if (info != dm_node_get_root()) {
get_db_index_condition(info->parent, buf, indexes, cnt);
}
if (info->type == DM_NODE_OBJECT_LIST && indexes[*cnt] > 0) {
if (*cnt > 0) {
strcat(buf, " AND ");
}
strcat(buf, "\"IndexOf");
strcat(buf, info->name);
strcat(buf, "\"");
strcat(buf, "=");
strcat(buf, get_int_str(indexes[*cnt]));
(*cnt)++;
}
}
static void get_db_index_field(const struct dm_node_info *info, char *buf, int *cnt)
{
if (info == NULL)
return;
if (info != dm_node_get_root())
get_db_index_field(info->parent, buf, cnt);
if (info->type == DM_NODE_OBJECT_LIST) {
if (*cnt > 0) {
strcat(buf, ",");
}
strcat(buf, "\"IndexOf");
strcat(buf, info->name);
strcat(buf, "\"");
(*cnt)++;
}
}
static const char *get_query_sql(const dm_node_t *node, int max_in_list)
{
const struct dm_node_info *info = dm_node_get_info(node->id);
if (info == NULL) {
dmlog_error("get_query_sql, node id not found");
return NULL;
}
strcpy(sql_statement, "select ");
if (dm_node_is_parameter(node->id)) {
if (max_in_list)
strcat(sql_statement, "MAX(");
strcat(sql_statement, "\"");
strcat(sql_statement, get_table_field_name(info));
strcat(sql_statement, "\"");
if (max_in_list)
strcat(sql_statement, ")");
} else if (dm_node_is_objectlist(node->id)) {
const char *node_name = dm_node_name(node->id);
if (node_name == NULL)
return NULL;
strcat(sql_statement, "\"IndexOf");
strcat(sql_statement, node_name);
strcat(sql_statement, "\"");
} else {
return NULL;
}
strcat(sql_statement, " FROM ");
strcat(sql_statement, dm_node_get_table_name(info));
if (node->cnt > 0) {
strcat(sql_statement, " WHERE ");
int cnt = 0;
if (max_in_list) {
// start from parant of the object.
info = dm_node_get_info(dm_node_id_parent(dm_node_id_parent(node->id)));
}
get_db_index_condition(info, sql_statement, node->index, &cnt);
}
strcat(sql_statement, ";");
return sql_statement;
}
static const char *get_query_all_indexes_sql(const dm_node_t *node, const char *keys, int sort)
{
const struct dm_node_info *info = dm_node_get_info(node->id);
if (info == NULL) {
dmlog_error("get_query_all_indexes_sql, node id not found");
return NULL;
}
if (dm_node_is_counter(node->id)) {
dm_node_id_t counter = dm_node_counter_id(node->id);
if (counter == INVALID_DM_NODE_ID) {
dmlog_error("invalid counter id %d", counter);
return NULL;
}
info = dm_node_get_info(counter);
if (info == NULL) {
dmlog_error("info is NULL");
return NULL;
}
strcpy(sql_statement, "select COUNT(\"IndexOf");
strcat(sql_statement, info->name);
strcat(sql_statement, "\")");
} else {
strcpy(sql_statement, "select");
strcat(sql_statement, " IndexOf");
strcat(sql_statement, info->name);
int index_cnt = dm_node_index_cnt(node->id);
if (index_cnt > 1 && node->cnt < index_cnt - 1) {
const struct dm_node_info *pinfo = dm_node_get_info(dm_node_i_parent_id(node->id));
if (pinfo) {
strcat(sql_statement, ",IndexOf");
strcat(sql_statement, pinfo->name);
} else {
dmlog_error("get_query_all_indexes_sql, invalid node: %s", dm_node_str(node));
}
}
struct dm_object *obj = (struct dm_object *)info;
if (obj->key_param_names != NULL) {
strcat(sql_statement, ",_key");
char *tmp = strdup(obj->key_param_names);
char *key = strtok(tmp, ",");
while (key != NULL) {
strcat(sql_statement, ",\"");
strcat(sql_statement, key);
strcat(sql_statement, "\"");
key = strtok(NULL, ",");
}
free(tmp);
}
}
strcat(sql_statement, " FROM ");
strcat(sql_statement, dm_node_get_table_name(info));
if (dm_node_index_cnt(info->parent->node_id) > 0 && node->cnt > 0) {
strcat(sql_statement, " WHERE ");
int cnt = 0;
get_db_index_condition(info->parent, sql_statement, node->index, &cnt);
if (keys != NULL && keys[0] != '\0') {
strcat(sql_statement, " AND ");
}
} else if (keys != NULL && keys[0] != '\0') {
strcat(sql_statement, " WHERE ");
}
if (keys != NULL && keys[0] != '\0') {
strcat(sql_statement, keys);
}
if (sort && info->flag & FLAG_HAS_ORDER)
strcat(sql_statement, " ORDER BY \"Order\"");
strcat(sql_statement, ";");
return sql_statement;
}
// The SQL standard specifies that single-quotes in strings are escaped
// by putting two single quotes in a row.
static void strcat_str_data(char *dest, const char *data)
{
int len = strlen(dest);
for (; *data != '\0'; data++, len++) {
if (*data == '\'') {
dest[len] = '\'';
len++;
dest[len] = '\'';
} else {
dest[len] = *data;
}
}
dest[len] = '\0';
}
static const char *get_update_sql(const dm_node_t *node, const char *data)
{
const struct dm_node_info *info = dm_node_get_info(node->id);
if (info == NULL) {
dmlog_error("get_update_sql, node id not found");
return NULL;
}
strcpy(sql_statement, "UPDATE ");
strcat(sql_statement, dm_node_get_table_name(info));
strcat(sql_statement, " SET \"");
strcat(sql_statement, get_table_field_name(info));
strcat(sql_statement, "\" = ");
strcat(sql_statement, "\'");
if (strchr(data, '\'') != NULL)
// containing single quotes, needs special handling.
strcat_str_data(sql_statement, (const char *)data);
else
strcat(sql_statement, (const char *)data);
strcat(sql_statement, "\'");
if (node->cnt > 0) {
strcat(sql_statement, " WHERE ");
int cnt = 0;
get_db_index_condition(info, sql_statement, node->index, &cnt);
}
strcat(sql_statement, ";");
return sql_statement;
}
static int db_next_index_callback(void *context, int argc, char *argv[], char *cols[])
{
(void)argc;
(void)cols;
int *next_index = (int *)context;
if (argv[0]) {
*next_index = atoi(argv[0]);
}
return 0;
}
static int exec_sql(const char *sql, sqlite_callback callback, void *context)
{
if (transaction_db == NULL) {
dmlog_error("db is not opened");
return -1;
}
char *err_msg = NULL;
int ret = sqlite3_exec(transaction_db, sql, callback, context, &err_msg);
if (ret != SQLITE_OK) {
dmlog_error("SQL command(%s) error: %s", sql, err_msg);
sqlite3_free(err_msg);
return -1;
}
return 0;
}
// sql in transaction
static int exec_trans_sql(const char *sql, sqlite_callback callback, void *context)
{
return exec_sql(sql, callback, context);
}
static int get_object_next_index(const dm_node_t *node)
{
const struct dm_node_info *info = dm_node_get_info(node->id);
if (info == NULL)
return -1;
const struct dm_node_info *p = info->parent;
while (p != dm_node_get_root() && p->parent != dm_node_get_root()) {
if (p->type == DM_NODE_OBJECT_LIST) {
break;
}
p = p->parent;
}
strcpy(sql_statement, "select ");
strcat(sql_statement, "\"NextIndexOf");
strcat(sql_statement, get_node_rel_path_name(p, info));
strcat(sql_statement, "\" FROM ");
if (p->parent == dm_node_get_root()) {
strcat(sql_statement, dm_node_get_root()->name);
strcat(sql_statement, "_");
strcat(sql_statement, p->name);
} else {
const char *tb = dm_node_get_table_name(p);
if (tb == NULL) {
dmlog_error("no db table for node %s", dm_node_str(node));
return -1;
}
strcat(sql_statement, tb);
if (dm_node_index_cnt(node->id) > 1) {
strcat(sql_statement, " WHERE ");
int cnt = 0;
get_db_index_condition(info->parent, sql_statement, node->index, &cnt);
}
}
strcat(sql_statement, ";");
int next_index = -1;
exec_sql(sql_statement, db_next_index_callback, &next_index);
return next_index;
}
static int update_object_next_index(const dm_node_t *node, int next_index)
{
const struct dm_node_info *info = dm_node_get_info(node->id);
if (info == NULL)
return -1;
const struct dm_node_info *p = info->parent;
while (p != dm_node_get_root() && p->parent != dm_node_get_root()) {
if (p->type == DM_NODE_OBJECT_LIST) {
break;
}
p = p->parent;
}
strcpy(sql_statement, "UPDATE ");
if (p->parent == dm_node_get_root()) {
strcat(sql_statement, dm_node_get_root()->name);
strcat(sql_statement, "_");
strcat(sql_statement, p->name);
} else {
strcat(sql_statement, dm_node_get_table_name(p));
}
strcat(sql_statement, " SET \"NextIndexOf");
strcat(sql_statement, get_node_rel_path_name(p, info));
strcat(sql_statement, "\" = ");
strcat(sql_statement, get_int_str(next_index));
if (dm_node_index_cnt(node->id) > 1) {
strcat(sql_statement, " WHERE ");
int cnt = 0;
get_db_index_condition(info->parent, sql_statement, node->index, &cnt);
}
strcat(sql_statement, ";");
return exec_trans_sql(sql_statement, NULL, NULL);
}
static const char *get_insert_sql(const dm_node_t *node, int new_index)
{
int i;
int cnt = 0;
const struct dm_node_info *info = dm_node_get_info(node->id);
if (info == NULL) {
dmlog_error("get_insert_sql, node id not found");
return NULL;
}
strcpy(sql_statement, "insert INTO ");
strcat(sql_statement, dm_node_get_table_name(info));
strcat(sql_statement, " (");
get_db_index_field(info, sql_statement, &cnt);
strcat(sql_statement, ") VALUES(");
cnt = dm_node_index_cnt(node->id);
for (i = 0; i < cnt - 1; i++) {
strcat(sql_statement, get_int_str(node->index[i]));
strcat(sql_statement, ",");
}
strcat(sql_statement, get_int_str(new_index));
strcat(sql_statement, ");");
return sql_statement;
}
static const char *get_delete_sql_condition(const dm_node_t *node)
{
static char condition[256];
const struct dm_node_info *info = dm_node_get_info(node->id);
if (info == NULL) {
dmlog_error("get_delete_sql_condition, node id not found");
return NULL;
}
if (node->cnt > 0) {
strcpy(condition, " WHERE ");
int cnt = 0;
get_db_index_condition(info, condition, node->index, &cnt);
} else {
return NULL;
}
return condition;
}
int dbmgr_init(const char *db_file)
{
int ret = sqlite3_open_v2(db_file, &transaction_db, SQLITE_OPEN_READWRITE, NULL);
if (SQLITE_OK != ret || transaction_db == NULL) {
dmlog_error("sqlite3_open_v2 failed");
return -1;
}
sqlite3_busy_timeout(transaction_db, SQLITE_BUSY_TIMEOUT);
sqlite3_busy_handler(transaction_db, db_busy_handler, NULL);
dmlog_info("dbmgr_init OK");
return 0;
}
int dbmgr_finalize(void)
{
if (transaction_db) {
sqlite3_close(transaction_db);
transaction_db = NULL;
}
return 0;
}
int dbmgr_tranx_begin(void)
{
dmlog_debug("dbmgr_tranx_begin");
return exec_trans_sql("BEGIN;", NULL, NULL);
}
int dbmgr_tranx_revert(void)
{
dmlog_debug("dbmgr_tranx_revert");
return exec_trans_sql("ROLLBACK;", NULL, NULL);
}
int dbmgr_tranx_commit(void)
{
dmlog_debug("dbmgr_tranx_commit");
int ret = exec_trans_sql("COMMIT;", NULL, NULL);
if (ret != 0) {
dmlog_error("dbmgr_tranx_commit, sql COMMIT; failed");
return -1;
}
return 0;
}
static int db_get_data_callback(void *context, int cnt, char *data[], char *names[])
{
if (cnt <= 0 || data[0] == NULL) {
dmlog_debug("db_get_data_callback, no data");
return 0;
}
char **val = context;
*val = strdup(data[0]);
return 0;
}
static int _dbmgr_get(const dm_node_t *node, char **value, int get_max_in_list)
{
const char *sql = get_query_sql(node, get_max_in_list);
if (sql == NULL) {
return -1;
}
return exec_sql(sql, db_get_data_callback, value);
}
int dbmgr_get(const dm_node_t *node, char **value)
{
int retval = _dbmgr_get(node, value, 0);
return retval;
}
int dbmgr_get_child(const dm_node_t *node, const char *child_name, char **value)
{
dm_node_t child_node = *node;
child_node.id = dm_node_get_child_id(node->id, child_name);
return dbmgr_get(&child_node, value);
}
int dbmgr_get_max(const dm_node_t *node, unsigned int *max)
{
char *value = NULL;
if (_dbmgr_get(node, &value, 1) < 0 || value == NULL) {
return -1;
}
*max = atoi(value);
free(value);
return 0;
}
int dbmgr_set(const dm_node_t *node, const char *data)
{
const char *sql = get_update_sql(node, data);
int ret = exec_trans_sql(sql, NULL, NULL);
if (ret != 0)
return -1;
return 0;
}
int dbmgr_set_uint(const dm_node_t *node, unsigned int data)
{
char *str;
asprintf(&str, "%u", data);
int ret = dbmgr_set(node, str);
free(str);
return ret;
}
int dbmgr_get_next_free_index(const dm_node_t *node)
{
return get_object_next_index(node);
}
int dbmgr_add(const dm_node_t *node)
{
int next_index = get_object_next_index(node);
if (next_index <= 0) {
dmlog_error("dbmgr_add Error: next_index is %d", next_index);
return 0;
}
const char *sql = get_insert_sql(node, next_index);
int ret = exec_trans_sql(sql, NULL, NULL);
if (ret != 0)
return -1;
if (update_object_next_index(node, next_index + 1) != 0) {
return 0; // 0 means invalid index
}
return next_index;
}
static int delete_db_instances(const struct dm_node_info *info, const char *conditions)
{
if (info == NULL) {
dmlog_info("delete_db_instances node not found");
return -1;
}
const char *table_name = dm_node_get_table_name(info);
if (info->type == DM_NODE_OBJECT_LIST && table_name) {
strcpy(sql_statement, "DELETE FROM ");
strcat(sql_statement, table_name);
if (conditions != NULL && *conditions != '\0') {
strcat(sql_statement, conditions);
}
strcat(sql_statement, ";");
return exec_trans_sql(sql_statement, NULL, NULL);
}
return 0;
}
int dbmgr_del(const dm_node_t *node)
{
return delete_db_instances(dm_node_get_info(node->id), get_delete_sql_condition(node));
}
int dbmgr_query_indexes(const dm_node_t *node, const char *keys, sqlite_callback cb, void *context, int sort)
{
const char *sql = get_query_all_indexes_sql(node, keys, sort);
return exec_sql(sql, cb, context);
}

View File

@@ -1,39 +0,0 @@
/*
* Copyright (c) 2023 Genexis B.V. All rights reserved.
*
* This Software and its content are protected by the Dutch Copyright Act
* ('Auteurswet'). All and any copying and distribution of the software
* and its content without authorization by Genexis B.V. is
* prohibited. The prohibition includes every form of reproduction and
* distribution.
*
*/
#ifndef DBMGR_H
#define DBMGR_H
#include "dm_types.h"
typedef int (*sqlite_callback)(void *, int, char *[], char *[]);
int dbmgr_init(const char *db_file);
int dbmgr_finalize(void);
int dbmgr_tranx_begin(void);
int dbmgr_tranx_revert(void);
int dbmgr_tranx_commit(void);
int dbmgr_get(const dm_node_t *node, char **value);
int dbmgr_get_child(const dm_node_t *node, const char *child_name, char **value);
int dbmgr_get_max(const dm_node_t *node, unsigned int *max);
int dbmgr_set(const dm_node_t *node, const char *data);
int dbmgr_set_uint(const dm_node_t *node, unsigned int data);
int dbmgr_add(const dm_node_t *node);
int dbmgr_del(const dm_node_t *node);
int dbmgr_query_indexes(const dm_node_t *node, const char *keys, sqlite_callback cb, void *context,
int sort);
int dbmgr_get_next_free_index(const dm_node_t *node);
#endif /* DBMGR_H */

File diff suppressed because it is too large Load Diff

View File

@@ -1,265 +0,0 @@
/*
* Copyright (c) 2023 Genexis B.V. All rights reserved.
*
* This Software and its content are protected by the Dutch Copyright Act
* ('Auteurswet'). All and any copying and distribution of the software
* and its content without authorization by Genexis B.V. is
* prohibited. The prohibition includes every form of reproduction and
* distribution.
*
*/
#ifdef __cplusplus
extern "C" {
#endif
#ifndef DM_API_H
#define DM_API_H
#include <json-c/json.h>
#include "dm_types.h"
#include "dm_node.h"
struct dm_uci_context {
int id;
const char *savedir;
};
/** Initialize dmapi.
* @return 0 in case of success, or < 0 in case of error
*/
int dmapi_init(const char *service_name);
// Transaction action when session is end
enum TRANX_ACTION {
TRANX_NO_ACTION = 0,
TRANX_ROLLBACK,
TRANX_COMMIT,
TRANX_COMMIT_AND_APPLY
};
/** Start a session.
* Only one session can be started and active
* While starting the session the lock is taken to avoid race conditions
* @pre dmapi_init should be called successfully
* @return 0 in case of success, or a nonzero value in case of error
*/
int dmapi_session_start();
/** End a session.
* Only one session can be started and active
* @pre dmapi_session_start should be called successfully
* @param action[in] one of the actions defined in TRANX_ACTION
* @return 0 in case of success, or a nonzero value in case of error
*/
int dmapi_session_end(int action);
/** Apply changes in a session.
* @pre dmapi_session_start should be called successfully
* @return 0 in case of success, or a nonzero value in case of error
*/
int dmapi_session_apply(void);
/** Commits a transaction in a session.
* @pre dmapi_session_start should be called successfully
* and have modified parameters or objects by calling
* dmapi_xxx_set/add/del
* @return 0 in case of success, or a nonzero value in case of error
*/
int dmapi_session_commit(void);
/** Revert changes made in current session.
* @pre dmapi_session_start should be called successfully
* and have modified parameters or objects by calling
* dmapi_xxx_set/add/del
* @return 0 in case of success, or a nonzero value in case of error
*/
int dmapi_session_revert(void);
/** Check if session in the dmapi context is valid and active.
* @pre dmapi_init should be called successfully
* @return 1 if active, or 0 when invalid or not started
*/
int dmapi_in_session(void);
/** Delete a dmapi communication context.
* @pre dmapi_init should be called successfully
* @return none
*/
void dmapi_quit(void);
/** Enable or disable feature of "session on the fly"
* With this feature enabled, session will be started automatically when set/add/del APIs are called,
* so you don't have to call dmapi_session_start explicitly before modifying data models.
* but you still need to end the session explicitly by calling dmapi_session_end.
* @param enable: 0: disable, 1: enable
* @pre dmapi_init should be called successfully
* @return none
*/
void dmapi_set_session_on_fly(int enable);
/** Set UCI savedir
* @param savedir: UCI savedir
* @pre dmapi_init should be called successfully
* @return none
*/
void dmapi_set_uci_savedir(const char *savedir);
/** Enable or disable feature of "Auto reference deletion" when deleting objects
* this feature should be not enabled by cwmp client.
* @param enable: 0: disable, 1: enable
* @pre dmapi_init should be called successfully
* @return none
*/
void dmapi_set_dm_auto_del(int enable);
/** Get parameter value of specific parameter node.
*
* @pre dmapi_init should be called successfully
*
* @param[in] node - pointer to parameter node
* @param[out] value - pointer to pointer to save the result, the value must be freed after use.
*
* @return 0 in case of success, or a nonzero value in case of error
*/
int dmapi_param_get(const dm_node_t *node, char **value);
/** Set parameter value of specific parameter node.
*
* @pre dmapi_session_start should be called successfully
*
* @param[in] node - pointer to parameter node
* @param[in] param_data - pointer to string parameter value to set
*
* @return 0 in case of success, or a nonzero value in case of error
*/
int dmapi_param_set(const dm_node_t *node, const char *param_data);
/** Add specific object node.
*
* @pre dmapi_session_start should be called successfully
*
* @param[in] node - pointer to object node, the index of the node will be updated upon success
*
* @return 0 in case of success, or a nonzero value in case of error
*/
int dmapi_object_add(dm_node_t *node);
/** Delete specific object node.
*
* @pre dmapi_session_start should be called successfully
*
* @param[in] node - pointer to object node
*
* @return 0 in case of success, or a nonzero value in case of error
*/
int dmapi_object_del(const dm_node_t *node);
/** Operate on the command node.
*
* @param[in] node - pointer to multi-object node as input
* @param[in] args - arguments in json format.
* @param[out] json_input - input of the command as json string format.
* @param[out] output - output of the command as json_object.
*
* @return 0 in case of success, -1 in case of error
*/
int dmapi_operate(const dm_node_t *node, const char *json_input, struct json_object **json_output);
/** Check if an object instance exist
* @param[in] node - node as input
* @param[in] db_only - if the node should be in db or not
* @return 1 if exist, otherwise return 0
*/
int dmapi_node_exist(const dm_node_t *node, int db_only);
typedef void *dm_nodelist_h;
#define DM_INVALID_NODELIST ((dm_nodelist_h)NULL)
// macro to iterate the node list
#define nodelist_for_each_node(node, list) \
for ((node) = dm_nodelist_first((list)); (node) != NULL; (node) = dm_nodelist_next((list)))
/** Get all instances of a multi-object node, node must be a multi-object type.
* @param node[in] pointer to multi-object node as input
* @return handle of instances list in case of success, NULL value in case of error
*/
dm_nodelist_h dm_nodelist_get(const dm_node_t *node);
// for only db
dm_nodelist_h dm_nodelist_get_db(const dm_node_t *node);
/** Get all instances of a multi-object node, node must be a multi-object type.
* @param node[in] pointer to multi-object node as input
* @param keys[in] sql condition
* @param only_db[in] true if only get db instance.
* @return handle of instances list in case of success, NULL value in case of error
*/
dm_nodelist_h dm_nodelist_find(const dm_node_t *node,
const char *keys, int only_db);
/** Wrapper interface for dm_nodelist_find to find only one node as expected.
* If more than one are found, the first one will be the result
* @param node[in] pointer to multi-object node as input
* @param keys[in] sql condition
* @param only_db[in] true if only get db instance.
* @param result[out] as output
* @return 0 if not found, or a nonzero value when found
*/
int dm_nodelist_find_first(const dm_node_t *node,
const char *keys, dm_node_t *result, int only_db);
/** Free a node list.
* @pre dm_nodelist_get or dm_nodelist_find should be called successfully
* @param list[in] handler of node list as input
* @return none
*/
void dm_nodelist_free(dm_nodelist_h list);
/** Get first instance of node instance list.
* @pre dm_nodelist_get or dm_nodelist_find should be called successfully
* @param list[in] handler of node list as input
* @return node pointer on success, NULL when node does not exist
*/
const dm_node_t *dm_nodelist_first(dm_nodelist_h list);
/** Get next instance of node instance list.
* @pre dm_nodelist_get or dm_nodelist_find should be called successfully
* @param list[in] handler of node list as input
* @return node pointer on success, NULL when node does not exist
*/
const dm_node_t *dm_nodelist_next(dm_nodelist_h list);
/** Get count of instance in the list.
* @pre dm_nodelist_get or dm_nodelist_find should be called successfully
* @param[in] list handler of node list as input
* @return number of instances
*/
int dm_nodelist_cnt(dm_nodelist_h list);
/** Get each node by index, scope is from [0 - (max_cnt-1)].
* @pre dm_nodelist_get or dm_nodelist_find should be called successfully
* @param list_h[in] handler of node list as input
* @param i[in] index value as input
* @return node pointer
*/
const dm_node_t *dm_nodelist_node(dm_nodelist_h list_h, int i);
/** Get node index by sequence number, scope is from [0 - (max_cnt-1)].
* @pre dm_nodelist_get or dm_nodelist_find should be called successfully
* @param list_h[in] handler of node list as input
* @param i[in] index value as input
* @return index of node
*/
dm_index_t dm_nodelist_index(dm_nodelist_h list_h, int i);
const char *dm_nodelist_key(dm_nodelist_h list_h, int i, const char *key);
void dmapi_dump_node_buffer(const dm_node_t *node);
int dmapi_handle_ubus_event(dm_node_id_t id, const char *json_event_str, struct json_object **res);
#endif /* for dmapi_H */
#ifdef __cplusplus
}
#endif

View File

@@ -1,725 +0,0 @@
/*
* Copyright (c) 2023 Genexis B.V. All rights reserved.
*
* This Software and its content are protected by the Dutch Copyright Act
* ('Auteurswet'). All and any copying and distribution of the software
* and its content without authorization by Genexis B.V. is
* prohibited. The prohibition includes every form of reproduction and
* distribution.
*
*/
#include "dm_apply.h"
#include <quickjs/quickjs-libc.h>
#include <stdlib.h>
#include <string.h>
#include "dbmgr.h"
#include "dm_api.h"
#include "dm_list.h"
#include "dm_log.h"
#include "dm_node.h"
#include "qjs_uci_api.h"
#include "qjs.h"
#include "ubus_client.h"
typedef struct {
const char* key;
int order;
} key_order_pair;
// datamodel change list during the session
static dm_list_h changed_node_list = NULL;
static dm_list_h apply_uci_list = NULL;
void add_apply_package(char *package)
{
if (!package) {
return;
}
if (apply_uci_list == NULL) {
apply_uci_list = dm_list_create(NULL);
}
// add the uci package name to the apply list
if (!dm_list_contains(apply_uci_list, (const void*)package)) {
dmlog_debug("add uci package to apply: %s", package);
dm_list_append(apply_uci_list, package);
} else {
free(package);
}
}
void del_apply_package(char *package)
{
if (!package || !apply_uci_list) {
return;
}
dmlog_debug("remove uci package to apply: %s", package);
dm_list_remove(apply_uci_list, package);
}
void dm_apply_reset_changes(void)
{
if (changed_node_list) {
dm_list_free(changed_node_list);
changed_node_list = NULL;
}
if (apply_uci_list) {
dm_list_free(apply_uci_list);
apply_uci_list = NULL;
}
}
static int node_change_cmp(const void *change1, const void *change2)
{
struct node_change *n1 = (struct node_change *)change1;
struct node_change *n2 = (struct node_change *)change2;
return !(dm_node_compatible(&n1->node, &n2->node) && (n1->redirected == n2->redirected));
}
static dm_node_id_t find_parent_with_apply_handler(const struct dm_node_info *node_info)
{
if (node_info == NULL) {
return INVALID_DM_NODE_ID;
}
if (qjs_has_apply_handler(node_info->node_id)) {
return node_info->node_id;
}
return find_parent_with_apply_handler(node_info->parent);
}
int dm_appy_add_change(enum DATAMODEL_ACTION action, const dm_node_t *node)
{
dmlog_debug("add change %s", dm_node_str(node));
if (changed_node_list == NULL) {
changed_node_list = dm_list_create(node_change_cmp);
}
struct node_change *pchange = calloc(1, sizeof(struct node_change));
pchange->action = action;
pchange->node = *node;
const struct dm_node_info * info = dm_node_get_info(node->id);
const char *uci_map;
const struct dm_parameter *param = NULL;
// special handling for "Order" parameter
if (info->type == DM_NODE_PARAMETER && info->flag & FLAG_HAS_ORDER) {
// Order parameter,
param = (const struct dm_parameter *)info;
const struct dm_object *pobj = dm_node_get_object(dm_node_i_parent_id(node->id));
if (param->map.map == NULL && pobj->map.map) {
pchange->node.cnt--;
goto end;
}
}
if (info->type == DM_NODE_PARAMETER) {
param = (const struct dm_parameter *)info;
uci_map = param->map.map;
} else {
const struct dm_object *obj = (const struct dm_object *)info;
uci_map = obj->map.map;
}
if (uci_map == NULL && !qjs_has_apply_handler(node->id)) {
// look for parent handler
const dm_node_id_t pid = dm_node_i_parent_id(node->id);
if (pid != INVALID_DM_NODE_ID) {
const struct dm_object *pobj = dm_node_get_object(pid);
if (pobj && pobj->map.map) {
if (param && param->data_type == DM_PATH_NAME && param->data.paths &&
dm_node_get_info(param->data.paths[0])->depends_node_id == pid) {
goto end;
}
dmlog_debug("skip apply for %s", dm_node_str(node));
free(pchange);
return 0;
}
}
dm_node_id_t id = find_parent_with_apply_handler(info->parent);
if (id == INVALID_DM_NODE_ID) {
dmlog_debug("ignored node change for apply %s", dm_node_str(node));
free(pchange);
return 0;
}
pchange->node.id = id;
pchange->node.cnt = dm_node_index_cnt(id);
}
dm_node_id_t depend = dm_node_get_apply_depends(pchange->node.id);
if (depend != INVALID_DM_NODE_ID) {
pchange->node.id = depend;
pchange->redirected = 1;
}
end:
if (dm_list_contains(changed_node_list, (const void*)pchange)) {
free(pchange);
return 0;
}
if (dm_node_is_objectlist(pchange->node.id)) {
pchange->node.cnt = dm_node_index_cnt(pchange->node.id) - 1;
}
dm_list_append(changed_node_list, pchange);
dmlog_debug("added node change %s", dm_node_str(&pchange->node));
return 0;
}
static char *get_package_name(const char *uci_path)
{
char *dot_position = strchr(uci_path, '.');
if (dot_position == NULL) {
dmlog_error("missing dot in the uci_path: %s", uci_path);
return NULL;
}
int length = dot_position - uci_path;
char *result = (char *)malloc(length + 1); // +1 for the null-terminator
strncpy(result, uci_path, length);
result[length] = '\0';
return result;
}
static char *get_package_type(const char *uci_path)
{
char *dot_position = strchr(uci_path, '.');
if (dot_position == NULL) {
dmlog_error("missing dot in the uci_path: %s", uci_path);
return NULL;
}
size_t length = strlen(dot_position + 1) + 1;
char *substring = malloc(length);
strncpy(substring, dot_position + 1, length);
substring[length - 1] = '\0';
return substring;
}
static dm_nodelist_h find_refer_instances(const dm_node_t *node, const dm_node_t *ref_node)
{
dm_path_t path;
dm_node2name(ref_node, path, sizeof(dm_path_t));
const struct dm_object *obj = dm_node_get_object(node->id);
// find the parameter that refers to the "ref_node"
for (int i = 0; i < obj->param_num; i++) {
const struct dm_parameter *param = (const struct dm_parameter*)obj->param_list[i];
if (param->data_type == DM_PATH_NAME && param->data.paths[0] == ref_node->id) {
const struct dm_node_info *param_info = dm_node_get_info(param->node.node_id);
char *search = NULL;
asprintf(&search, "%s='%s'", param_info->name, path);
dmlog_debug("search key: %s", search);
dm_nodelist_h res = dm_nodelist_find(node, search, 1);
free(search);
return res;
}
}
return DM_INVALID_NODELIST;
}
static void apply_param_uci_map(const dm_node_t *node, const dm_node_t *parent, const struct dm_uci_map *map)
{
dmlog_debug("apply_param_uci_map: %s", dm_node_str(node));
char *val = NULL;
if (dbmgr_get(node, &val) < 0 || val == NULL) {
return;
}
char *uci_path = NULL;
char *uci_pkg = NULL;
if (map->map) {
if (parent && strchr(map->map, '.') == NULL) {
char *uci_key = NULL;
dm_node_t key_node;
const struct dm_object *obj = dm_node_get_object(parent->id);
if (dm_node_get_child(parent, "_key", &key_node) < 0) {
dmlog_error("failed to get _key %s", dm_node_str(parent));
goto end;
}
if (dbmgr_get(&key_node, &uci_key) < 0 || uci_key == NULL) {
dmlog_error("failed to call dbmgr_get %s", dm_node_str(&key_node));
goto end;
}
if (obj->map.map == NULL) {
dmlog_error("missing uci map %s", dm_node_str(parent));
free(uci_key);
goto end;
}
uci_pkg = get_package_name(obj->map.map);
asprintf(&uci_path, "%s.%s.%s", uci_pkg, uci_key, map->map);
free(uci_key);
} else {
uci_path = strdup(map->map);
uci_pkg = get_package_name(map->map);
}
if (map->type == DM_UCI_MAP_TYPE_SIMPLE || map->type == DM_UCI_MAP_TYPE_INTERFACE) {
if (dm_node_data_type(node->id) == DM_PATH_NAME) {
// the the uci to the section name of the corresponding pathname
if (val[0] == '\0') {
dm_uci_set(uci_path, val);
} else {
char *key = NULL;
dm_node_t ref_node;
dm_path2node(val, &ref_node);
dbmgr_get_child(&ref_node, "_key", &key);
if (key != NULL) {
dm_uci_set(uci_path, key);
free(key);
} else {
dmlog_error("failed to get key of node %s", val);
}
}
} else {
if (dm_node_is_bool_type(node->id)) {
if (strcmp(val, "1") == 0 || strcmp(val, "true") == 0)
dm_uci_set(uci_path, "1");
else
dm_uci_set(uci_path, "0");
} else {
dm_uci_set(uci_path, val);
}
}
} else if (map->type == DM_UCI_MAP_TYPE_DISABLE) {
if (strcmp(val, "1") == 0 || strcmp(val, "true") == 0)
dm_uci_set(uci_path, "0");
else
dm_uci_set(uci_path, "1");
} else {
dmlog_debug("not support apply uci type: %s", dm_node_str(node));
}
free(uci_path);
}
qjs_call_apply_param_handler(node, val);
add_apply_package(uci_pkg);
end:
free(val);
return;
}
struct dm_inst_key {
const char *key;
dm_node_t node;
};
static int dm_inst_list_cmp(const void *item1, const void *item2)
{
struct dm_inst_key *inst1 = (struct dm_inst_key *)item1;
struct dm_inst_key *inst2 = (struct dm_inst_key *)item2;
if (strcmp((const char*)inst1->key, (const char*)inst2->key) == 0) {
return 0;
}
return 1;
}
static void free_dm_inst_list(dm_list_h list)
{
int cnt = dm_list_cnt(list);
for (int i = 0; i < cnt; i++) {
struct dm_inst_key *inst = dm_list_get(list, i);
free((char *)inst->key);
}
dm_list_free(list);
}
static dm_list_h get_obj_key_list(dm_nodelist_h list)
{
const dm_node_t *n = NULL;
dm_list_h key_list = dm_list_create(dm_inst_list_cmp);
nodelist_for_each_node(n, list)
{
char *key = NULL;
if (dbmgr_get_child(n, "_key", &key) == 0 && key != NULL) {
struct dm_inst_key *ins = malloc(sizeof(struct dm_inst_key));
ins->key = key;
ins->node = *n;
dm_list_append(key_list, ins);
} else {
dmlog_error("failed to get key for node: %s", dm_node_str(n));
}
}
return key_list;
}
static int apply_obj_uci_map(const dm_node_t *node, const struct dm_uci_map *map)
{
int ret = 0;
char *pkg_name = get_package_name(map->map);
char *type = get_package_type(map->map);
dm_nodelist_h list = dm_nodelist_get_db(node);
dm_list_h keys = get_obj_key_list(list);
dm_list_h sect_list = dm_list_create(NULL);
char *uci_key = NULL;
if (map->key != NULL && dm_node_index_cnt(node->id) > 1) {
dm_node_t parent;
dm_node_i_parent(node, &parent);
dbmgr_get_child(&parent, "_key", &uci_key);
}
json_object *json_list = NULL;
if (dm_uci_get_section_list(pkg_name, type, NULL, 0, &json_list) == 0 && json_list != NULL) {
json_object_object_foreach(json_list, key, val) {
if (!qjs_uci_filter(node, val)) {
continue;
}
const char *key_val = NULL;
if (map->key != NULL) {
json_object *val_obj;
if (json_object_object_get_ex(val, map->key, &val_obj)) {
key_val = json_object_get_string(val_obj);
}
}
if (uci_key && strcmp(uci_key, key_val) != 0) {
continue;
}
struct dm_inst_key inst;
inst.key = key;
if (!dm_list_contains(keys, &inst)) {
// special handling for user: set "deleted" instead of deleting
if (strcmp(pkg_name, "users") == 0 && strcmp(type, "user") == 0) {
char *tmp;
asprintf(&tmp, "%s.%s.deleted", pkg_name, key);
dm_uci_set(tmp, "1");
free(tmp);
} else {
dmlog_debug("uci-map: del uci: %s.%s", pkg_name, key);
if (qjs_call_uci_deinit_handler(node, key) == 0) {
dm_uci_del(pkg_name, key);
} else {
dmlog_debug("uci-map: skipped deleting %s", key);
}
}
} else {
dm_list_append(sect_list, strdup(key));
}
}
json_object_put(json_list);
}
int key_cnt = dm_list_cnt(keys);
for (int i = 0; i < key_cnt; i++) {
struct dm_inst_key *inst = dm_list_get(keys, i);
if (!dm_list_contains(sect_list, (const void*)inst->key)) {
// add section for the new instance
dmlog_debug("uci-map: add uci: %s.%s", pkg_name, type);
name_val_t opt_val;
int opt_cnt = 0;
if (uci_key != NULL && map->key) {
opt_val.name = map->key;
opt_val.value = uci_key;
opt_cnt = 1;
}
if (dm_uci_add(pkg_name, type, inst->key, &opt_val, opt_cnt) < 0) {
dmlog_error("failed to add new uci section");
ret = -1;
}
qjs_call_uci_init_handler(&inst->node);
}
}
if (pkg_name) {
add_apply_package(pkg_name);
pkg_name = NULL;
}
free(type);
if (pkg_name) {
free(pkg_name);
}
if (uci_key) {
free(uci_key);
}
dm_nodelist_free(list);
free_dm_inst_list(keys);
dm_list_free(sect_list);
return ret;
}
static int compare_order(const void* a, const void* b) {
key_order_pair* pair_a = (key_order_pair*)a;
key_order_pair* pair_b = (key_order_pair*)b;
return pair_a->order - pair_b->order;
}
static void reorder_uci_sections(const char *pkg, key_order_pair *pairs, int cnt)
{
qsort(pairs, cnt, sizeof(key_order_pair), compare_order);
struct blob_buf b;
memset(&b, 0, sizeof(b));
blob_buf_init(&b, 0);
blobmsg_add_string(&b, "config", pkg);
void *a = blobmsg_open_array(&b, "sections");
for (int i = 0; i < cnt; ++i) {
blobmsg_add_string(&b, NULL, pairs[i].key);
}
blobmsg_close_array(&b, a);
ubus_client_call("uci", "order", b.head, NULL, NULL);
blob_buf_free(&b);
add_apply_package((char*)pkg);
}
// reorder the uci sections according to the "Order" value
static void apply_order(const dm_node_t *node, const char *uci_map)
{
dmlog_info("apply_order, %s, %s", dm_node_str(node), uci_map);
dm_nodelist_h list = dm_nodelist_get_db(node);
int cnt = dm_nodelist_cnt(list);
if (cnt <= 1) {
dm_nodelist_free(list);
return;
}
key_order_pair* pairs = calloc(cnt, sizeof(key_order_pair));
for (int i = 0; i < cnt; i++) {
char* order_str = NULL;
dbmgr_get_child(dm_nodelist_node(list, i), "Order", &order_str);
if (order_str == NULL) {
dmlog_error("apply_order, failed to get Order");
goto exit;
}
pairs[i].order = atoi(order_str);
free(order_str);
pairs[i].key = dm_nodelist_key(list, i, "_key");
if (pairs[i].key == NULL) {
dmlog_error("apply_order, unexpected empty key value");
goto exit;
}
}
char *pkg = get_package_name(uci_map);
reorder_uci_sections(pkg, pairs, cnt);
exit:
for (int i = 0; i < cnt; i++) {
if (pairs[i].key) {
JS_FreeCString(qjs_ctx(), pairs[i].key);
}
}
free(pairs);
dm_nodelist_free(list);
}
static void apply_extended_obj(const dm_node_t *node, const char *path, dm_node_id_t id)
{
dmlog_debug("apply_extended_obj: %s, %s", dm_node_str(node), path);
dm_node_t ref_node;
if (path == NULL) {
return;
}
if (path[0] == '\0') {
// call the deinit handler for the extended object
dm_node_t n = {id};
char *uci_key = NULL;
dbmgr_get_child(node, "_key", &uci_key);
if (uci_key) {
qjs_call_uci_deinit_handler(&n, uci_key);
free(uci_key);
}
return;
}
if (dm_path2node(path, &ref_node) != 0) {
dmlog_error("apply_order, invalid path: %s", path);
return;
}
const struct dm_object *obj = dm_node_get_object(ref_node.id);
for (int i = 0; i < obj->param_num; i++) {
const struct dm_parameter *param = (const struct dm_parameter *)obj->param_list[i];
if (param->node.table_name == NULL) {
continue;
}
if (param->map.map == NULL && !qjs_has_apply_handler(node->id)) {
continue;
}
dm_node_t n = ref_node;
n.id = obj->param_list[i]->node_id;
apply_param_uci_map(&n, node, &param->map);
}
}
void dm_apply_node(struct node_change *change)
{
const dm_node_t *node = &change->node;
dm_node_t *pparent = NULL;
const struct dm_object *parent_obj = NULL;
dm_node_t parent_node;
if (dm_node_i_parent(node, &parent_node) == 0) {
pparent = &parent_node;
parent_obj = dm_node_get_object(parent_node.id);
}
if (parent_obj && parent_obj->map.map && parent_obj->node.depends_node_id != INVALID_DM_NODE_ID) {
// apply to the referenced object that it "extends"
dm_node_t refer_node = parent_node;
refer_node.id = parent_obj->node.depends_node_id;
refer_node.cnt--;
dm_nodelist_h res = find_refer_instances(&refer_node, &parent_node);
const dm_node_t *pnode = NULL;
nodelist_for_each_node(pnode, res) {
if (qjs_has_apply_handler(node->id)) {
qjs_call_apply_obj_handler(node, pnode);
}
else if (dm_node_is_parameter(node->id)) {
const struct dm_parameter *param = dm_node_get_parameter(node->id);
apply_param_uci_map(node, pnode, &param->map);
}
}
dm_nodelist_free(res);
return;
}
if (dm_node_is_parameter(node->id)) {
const struct dm_parameter *param = dm_node_get_parameter(node->id);
if (param->data_type == DM_PATH_NAME && param->data.paths) {
if (pparent && dm_node_get_info(param->data.paths[0])->depends_node_id == pparent->id) {
char *val = NULL;
dbmgr_get(node, &val);
apply_extended_obj(pparent, val, param->data.paths[0]);
return;
}
}
if (param->map.map != NULL || qjs_has_apply_handler(node->id)) {
apply_param_uci_map(node, pparent, &param->map);
if (param->node.flag & FLAG_HAS_ORDER) {
const struct dm_object *pobj = dm_node_get_object(dm_node_i_parent_id(node->id));
if (pobj && pobj->map.map) {
dm_node_t pnode;
dm_node_i_parent(node, &pnode);
apply_order(&pnode, pobj->map.map);
}
}
}
} else if (dm_node_is_objectlist(node->id)) {
const struct dm_object *obj = dm_node_get_object(node->id);
if (obj->map.map != NULL && obj->map.type == DM_UCI_MAP_TYPE_SIMPLE ) {
if (obj->node.depends_node_id == INVALID_DM_NODE_ID) {
apply_obj_uci_map(node, &obj->map);
}
} else if (qjs_has_apply_handler(node->id)) {
qjs_call_apply_obj_handler(node, node);
}
} else if (qjs_has_apply_handler(node->id)) {
qjs_call_apply_obj_handler(node, node);
} else {
dmlog_error("dm_apply_node, node is not handled: %s", dm_node_str(node));
}
}
static int commit_changes()
{
int ret = 0;
if (changed_node_list == NULL)
return 0;
int i;
int cnt = dm_list_cnt(changed_node_list);
for (i = 0; i < cnt; i++) {
struct node_change *change = (struct node_change *)dm_list_get(changed_node_list, i);
dmlog_debug("apply node: %s", dm_node_str(&change->node));
if (change->redirected) {
qjs_call_apply_obj_handler(&change->node, &change->node);
} else {
dm_apply_node(change);
}
dmlog_debug("end of apply node: %s", dm_node_str(&change->node));
}
return ret;
}
// reset parameters that are confidential in db.
static void reset_confidentials()
{
if (changed_node_list == NULL)
return;
int i;
// dbmgr_tranx_begin();
int cnt = dm_list_cnt(changed_node_list);
for (i = 0; i < cnt; i++) {
struct node_change *change = (struct node_change *)dm_list_get(changed_node_list, i);
if (dm_node_is_confidential(change->node.id)) {
if (dbmgr_set(&change->node, "") < 0) {
dmlog_error("failed to reset confidentail parameter: %s", dm_node_str(&change->node));
}
}
}
// dbmgr_tranx_commit();
}
int dm_apply_reset(void)
{
dm_apply_reset_changes();
return 0;
}
// static int reload_service(const char *svc)
// {
// struct blob_buf bb = {};
// if (svc == NULL) {
// return 0;
// }
// blob_buf_init(&bb, 0);
// blobmsg_add_string(&bb, "config", svc);
// int ret = ubus_client_call("uci", "commit", bb.head, NULL, NULL);
// if (ret != 0) {
// dmlog_error("failed to reload service %s", svc);
// } else {
// dmlog_info("reloaded service %s", svc);
// }
// blob_buf_free(&bb);
// return ret;
// }
int dm_apply_do_apply()
{
// int i;
commit_changes();
// the commit will be done in the bbf_config daemon
// int cnt = dm_list_cnt(apply_uci_list);
// for (i = 0; i < cnt; i++) {
// char *pkg_name = dm_list_get(apply_uci_list, i);
// dm_uci_commit(pkg_name);
// }
// cnt = dm_list_cnt(apply_uci_list);
// for (i = 0; i < cnt; i++) {
// char *pkg_name = dm_list_get(apply_uci_list, i);
// reload_service(pkg_name);
// }
reset_confidentials();
dm_apply_reset();
return 0;
}

View File

@@ -1,37 +0,0 @@
/*
* Copyright (c) 2023 Genexis B.V. All rights reserved.
*
* This Software and its content are protected by the Dutch Copyright Act
* ('Auteurswet'). All and any copying and distribution of the software
* and its content without authorization by Genexis B.V. is
* prohibited. The prohibition includes every form of reproduction and
* distribution.
*
*/
#ifndef DM_APPLY_H
#define DM_APPLY_H
#include <quickjs/quickjs.h>
#include "dm_node.h"
enum DATAMODEL_ACTION {
DATA_MODEL_ADD,
DATA_MODEL_SET,
DATA_MODEL_DELETE
};
struct node_change {
dm_node_t node;
enum DATAMODEL_ACTION action;
int redirected;
};
int dm_appy_add_change(enum DATAMODEL_ACTION action, const dm_node_t *node);
void dm_apply_reset_changes(void);
int dm_apply_reset(void);
int dm_apply_do_apply();
void dm_apply_node(struct node_change *change);
void add_apply_package(char *package);
#endif /* DM_APPLY_H */

View File

@@ -1,373 +0,0 @@
/*
* Copyright (c) 2023 Genexis B.V. All rights reserved.
*
* This Software and its content are protected by the Dutch Copyright Act
* ('Auteurswet'). All and any copying and distribution of the software
* and its content without authorization by Genexis B.V. is
* prohibited. The prohibition includes every form of reproduction and
* distribution.
*
*/
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "qjs_uci_api.h"
#include "dbmgr.h"
#include "dm.h"
#include "dm_log.h"
#include "dm_node.h"
#include "ubus_client.h"
#include "dm_api.h"
#include "qjs.h"
#include "dm_list.h"
static char* get_tr181_pathname(const char *pn, const char *uci_path, const char *uci_val)
{
json_object *values;
if (uci_val == NULL || uci_val[0] == '\0') {
return NULL;
}
char *path = strdup(uci_path);
char *config = strtok(path, ".");
char *type = strtok(NULL, ".");
if (dm_uci_get_section_list(config, type, NULL, 0, &values) < 0 || values == NULL) {
dmlog_error("get_tr181_pathname, dm_uci_get_section_list failed");
free(path);
return NULL;
}
free(path);
int is_network_interface = 0;
dm_list_h itf_dev_list = NULL;
// special handling for network interface
if (strcmp(uci_path, "network.interface") == 0) {
is_network_interface = 1;
itf_dev_list = dm_list_create(NULL);
}
int index = 1;
// Iterate over all keys in the 'values' object
json_object_object_foreach(values, key, value) {
(void)key;
struct json_object *name_value = NULL;
struct json_object *anonymous = NULL;
if (json_object_object_get_ex(value, ".anonymous", &anonymous)
&& json_object_get_boolean(anonymous)) {
json_object_object_get_ex(value, "name", &name_value);
} else {
json_object_object_get_ex(value, ".name", &name_value);
}
if (is_network_interface) {
// special handling for network interface: skip the loopback and dhcpv6 interfaces.
struct json_object *dev_value;
if (json_object_object_get_ex(value, "device", &dev_value)) {
const char *dev_name = json_object_get_string(dev_value);
if (strcmp(dev_name, "lo") == 0) {
continue;
}
if (dm_list_contains(itf_dev_list, (const void*)dev_name)) {
continue;
}
dm_list_append(itf_dev_list, strdup(dev_name));
}
}
if (strcmp(json_object_get_string(name_value), uci_val) == 0) {
// Found the object with the given name, return its index
char *ret_val = NULL;
asprintf(&ret_val, "%s%d", pn, index);
json_object_put(values);
return ret_val;
} else {
index++;
}
}
if (is_network_interface) {
dm_list_free(itf_dev_list);
}
json_object_put(values);
return NULL;
}
static int import_pathname(const dm_node_t *node, char *uci_value)
{
const struct dm_parameter * param = dm_node_get_parameter(node->id);
if (param->data.paths) {
dm_node_t ref_node = *node;
ref_node.id = param->data.paths[0];
const struct dm_object *obj = dm_node_get_object(ref_node.id);
if (obj && obj->map.map) {
dm_path_t pathname;
ref_node.cnt = dm_node_index_cnt(ref_node.id) - 1;
if (dm_node2name(&ref_node, pathname, sizeof(pathname)) < 0) {
dmlog_error("import_pathname, invalid node");
return -1;
}
// trim the last {i}.
int len = strlen(pathname);
pathname[len - 4] = '\0';
char *pn = get_tr181_pathname(pathname, obj->map.map, uci_value);
if (pn) {
int ret = dmapi_param_set(node, pn);
free(pn);
return ret;
} else {
dmlog_error("import_pathname, get_tr181_pathname failed");
return -1;
}
}
}
return -1;
}
static int importParam(const dm_node_t *node, const struct dm_uci_map *map)
{
char * val = NULL;
if (map->type == DM_UCI_MAP_TYPE_JS) {
JSValue result = qjs_eval_buf(map->map, strlen(map->map), "eval-import-js", JS_EVAL_TYPE_GLOBAL);
if (!JS_IsUndefined(result) && !JS_IsException(result)) {
const char *result_cstr = JS_ToCString(qjs_ctx(), result);
dmapi_param_set(node, result_cstr);
JS_FreeCString(qjs_ctx(), result_cstr);
JS_FreeValue(qjs_ctx(), result);
} else {
dmlog_error("failed to import js: %s", map->map);
}
return 0;
} else {
if (node->cnt == 0 || strchr(map->map, '.') != NULL) {
// uci option with full path
dm_uci_get(map->map, &val);
} else {
// uci option from the js object.
char *buf;
asprintf(&buf, "_arg['%s']", map->map);
JSValue result = qjs_eval_buf(buf, strlen(buf), "eval-uci-js", JS_EVAL_TYPE_GLOBAL);
free(buf);
if (!JS_IsException(result) && !JS_IsUndefined(result)) {
const char *result_cstr = JS_ToCString(qjs_ctx(), result);
val = strdup(result_cstr);
JS_FreeCString(qjs_ctx(), result_cstr);
}
JS_FreeValue(qjs_ctx(), result);
}
}
const struct dm_parameter * param = dm_node_get_parameter(node->id);
if (val == NULL) {
// uci option not present, use the default value defined in JSON if exist.
if (param->default_uci_val) {
dmapi_param_set(node, param->default_uci_val);
}
return 0;
}
if (map->type == DM_UCI_MAP_TYPE_DISABLE) {
if (*val == '0') {
dmapi_param_set(node, "true");
} else {
dmapi_param_set(node, "false");
}
} else if (param->data_type == DM_DATA_BOOLEAN) {
if (*val == '1' || !strcmp(val, "true") || !strcmp(val, "yes")) {
dmapi_param_set(node, "true");
} else {
dmapi_param_set(node, "false");
}
} else if (map->type == DM_UCI_MAP_TYPE_INTERFACE) {
char *pn = get_tr181_pathname("Device.IP.Interface.", "network.interface", val);
if (pn) {
dmapi_param_set(node, pn);
free(pn);
} else {
dmlog_error("import_pathname for network interface failed %s, %s", val, dm_node_str(node));
}
} else {
if (dm_node_data_type(node->id) == DM_PATH_NAME) {
import_pathname(node, val);
} else {
if (param->data_type == DM_DATA_ENUM && param->data.enum_strings) {
// convert the uci case to the tr181 case, ex 'udp' => 'UDP', 'up' => 'Up'
int i = 0;
while (param->data.enum_strings[i]) {
if (strcasecmp(param->data.enum_strings[i], val) == 0) {
dmapi_param_set(node, param->data.enum_strings[i]);
break;
}
i++;
}
if (!param->data.enum_strings[i]) {
dmlog_error("uci value %s not found in the enum, %s", val, dm_node_str(node));
}
} else {
dmapi_param_set(node, val);
}
}
}
free(val);
return 0;
}
static int import_dm_obj(const dm_node_t *node, JSValue js_values);
static int handle_obj_uci_map(const dm_node_t *node, const struct dm_uci_map *map)
{
int ret;
char *path = strdup(map->map);
char *config = strtok(path, ".");
char *type = strtok(NULL, ".");
json_object *values_obj;
if (map->key != NULL && dm_node_index_cnt(node->id) > 1) {
dm_node_t parent;
dm_node_i_parent(node, &parent);
const struct dm_object *obj = dm_node_get_object(parent.id);
if (obj->map.map == NULL || obj->map.type != DM_UCI_MAP_TYPE_SIMPLE) {
dmlog_error("handle_obj_uci_map missing parent map uci: %s", dm_node_str(node));
return -1;
}
char *key_val = NULL;
if (dbmgr_get_child(&parent, "_key", &key_val) < 0 || key_val == NULL) {
dmlog_error("handle_obj_uci_map dbmgr_get_child failed: %s", dm_node_str(node));
return -1;
}
name_val_t match = {map->key, key_val};
ret = dm_uci_get_section_list(config, type, &match, 1, &values_obj);
free(key_val);
} else {
ret = dm_uci_get_section_list(config, type, NULL, 0, &values_obj);
}
free(path);
if (ret != 0 || values_obj == NULL) {
dmlog_warn("dm_uci_get_section_list failed, uci: %s", map->map);
// uci could be missing, skip the error
return 0;
}
int obj_index = 0;
json_object_object_foreach(values_obj, key, val) {
(void)key;
if (!qjs_uci_filter(node, val)) {
continue;
}
dm_node_t new_inst = *node;
if (dmapi_object_add(&new_inst) < 0) {
dmlog_error("failed to add new instance");
ret = -1;
break;
}
json_object *name_obj;
if (json_object_object_get_ex(val, ".name", &name_obj)) {
dm_node_t key_node = new_inst;
key_node.id = dm_node_get_child_id(new_inst.id, "_key");
if (dmapi_param_set(&key_node, json_object_get_string(name_obj)) < 0) {
dmlog_error("failed to set param value");
ret = -1;
}
} else {
dmlog_error("failed to get uci name");
ret = -1;
break;
}
JSValue js_val = json_object_to_jsvalue(val);
JS_SetPropertyStr(qjs_ctx(), qjs_global(), "_arg", js_val);
JS_SetPropertyStr(qjs_ctx(), js_val, ".index", JS_NewInt32(qjs_ctx(), obj_index));
obj_index++;
ret = import_dm_obj(&new_inst, js_val);
if (ret < 0) {
dmlog_error("failed to import node %s", dm_node_str(&new_inst));
// break;
}
}
json_object_put(values_obj);
return 0;
}
extern int update_dm_value(JSContext *ctx, const char *key, JSValue value);
static int import_dm_obj(const dm_node_t *node, JSValue js_values) {
if (qjs_has_import_handler(node->id)) {
int ret = 0;
dm_path_t node_path;
dm_node2name(node, node_path, sizeof(dm_path_t));
if (dm_node_is_object(node->id)) {
strcat(node_path, ".");
}
JSValue input_val = js_values;
JSValue js_val = qjs_call_import_handler(node, input_val);
if (!JS_IsException(js_val) && !JS_IsUndefined(js_val)) {
int len = strlen(node_path);
if (node_path[len-2] == '}') {
node_path[len-4] = '\0';
}
const char *imported_val = get_js_value_str(js_val);
free_js_cstr(imported_val);
ret = update_dm_value(qjs_ctx(), node_path, js_val);
if (ret < 0) {
dmlog_error("update_dm_value failed");
}
}
JS_FreeValue(qjs_ctx(), js_val);
return ret;
}
if (dm_node_is_parameter(node->id)) {
if (dm_node_is_confidential(node->id) || !dm_node_has_db(node->id)) {
return 0;
}
const struct dm_parameter * param = dm_node_get_parameter(node->id);
if (param->map.map == NULL) {
return 0;
}
return importParam(node, &param->map);
}
if (dm_node_is_objectlist(node->id) && !dm_node_is_index_complete(node)) {
const struct dm_object *obj = dm_node_get_object(node->id);
if (obj->map.map != NULL && obj->map.type == DM_UCI_MAP_TYPE_SIMPLE) {
return handle_obj_uci_map(node, &obj->map);
} else {
dmlog_debug("ignored import for object: %s", dm_node_str(node));
}
return 0;
}
if (dm_node_is_object(node->id) || dm_node_is_objectlist(node->id)) {
const struct dm_object * obj = dm_node_get_object(node->id);
for (int i = 0; i < obj->param_num; i++) {
dm_node_t param_node = *node;
param_node.id = obj->param_list[i]->node_id;
if (import_dm_obj(&param_node, js_values) < 0) {
dmlog_error("import param failed: %s", dm_node_str(&param_node));
}
}
for (int i = 0; i < obj->object_num; i++) {
dm_node_t node_obj = *node;
node_obj.id = obj->object_list[i]->node_id;
if (import_dm_obj(&node_obj, js_values) < 0) {
dmlog_debug("import object failed: %s", dm_node_str(&node_obj));
}
}
}
return 0;
}
int importDM()
{
dm_node_t root_node = dm_init_node(DM_DEVICE);
return import_dm_obj(&root_node, JS_UNDEFINED);
}

View File

@@ -1,624 +0,0 @@
/*
* Copyright (c) 2023 Genexis B.V. All rights reserved.
*
* This Software and its content are protected by the Dutch Copyright Act
* ('Auteurswet'). All and any copying and distribution of the software
* and its content without authorization by Genexis B.V. is
* prohibited. The prohibition includes every form of reproduction and
* distribution.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <uci.h>
#include <stdint.h>
#include "dm_api.h"
#include "dm_node.h"
#include "dm_log.h"
#include "dm.h"
#include "dbmgr.h"
#define LOCK_FILE "/var/lock/bbfdm_reference_db.lock"
#define UCI_PACKAGE "bbfdm_reference_db"
extern const dm_node_id_t dm_linker_nodes[];
// Forward declarations
static char *calculate_hash(const char *input);
static int process_linker_node(struct uci_context *ctx, struct uci_package *pkg,
const char *service_hash, const dm_node_t *node);
static int clear_service_entries(struct uci_context *ctx, struct uci_package *pkg,
const char *service_hash);
static int ensure_uci_sections(struct uci_context *ctx, struct uci_package *pkg);
// FNV-1 32-bit parameters
#define FNV_OFFSET_BASIS 0x811C9DC5U
#define FNV_PRIME 0x1000193U
/*
* Calculate 8-character uppercase hex hash using the FNV-1 algorithm.
* Returns malloc-ed string (must be freed by caller) or NULL on error.
*/
static char *calculate_hash(const char *input) {
if (!input) {
return NULL;
}
uint32_t hash = FNV_OFFSET_BASIS;
const unsigned char *ptr = (const unsigned char *)input;
while (*ptr) {
hash *= FNV_PRIME;
hash ^= (uint32_t)(*ptr);
ptr++;
}
char *result = malloc(9); // 8 chars + null terminator
if (!result) {
return NULL;
}
snprintf(result, 9, "%08X", hash);
return result;
}
/**
* Check if a node ID is in the linker nodes array
*/
int is_linker_node(dm_node_id_t node_id) {
for (int i = 0; dm_linker_nodes[i] != INVALID_DM_NODE_ID; i++) {
if (dm_linker_nodes[i] == node_id) {
return 1;
}
}
return 0;
}
int object_has_linker_param(dm_node_id_t object_id) {
const struct dm_object *obj = dm_node_get_object(object_id);
if (!obj) {
return 0;
}
for (int i = 0; i < obj->param_num; i++) {
const struct dm_node_info *info = obj->param_list[i];
if (is_linker_node(info->node_id)) {
return 1;
}
}
return 0;
}
/**
* Process a single linker parameter node
*/
static int process_linker_node(struct uci_context *ctx, struct uci_package *pkg,
const char *service_hash, const dm_node_t *node) {
const struct dm_parameter *param = dm_node_get_parameter(node->id);
if (!param) return -1;
// Check if this parameter is a linker node
if (!is_linker_node(node->id)) {
dmlog_error("Not a linker parameter %s", dm_node_str(node));
return 0; // Not a linker parameter
}
// Get the parent object path
dm_node_t parent_node;
if (dm_node_i_parent(node, &parent_node) != 0) {
dmlog_error("Failed to get parent node for %s", dm_node_str(node));
return -1;
}
// Get parent path
dm_path_t parent_path;
if (dm_node2name(&parent_node, parent_path, sizeof(parent_path)) != 0) {
dmlog_error("Failed to get parent path for %s", dm_node_str(&parent_node));
return -1;
}
// Get current path (same as parent for leaf parameters)
dm_path_t current_path;
if (dm_node2name(&parent_node, current_path, sizeof(current_path)) != 0) {
dmlog_error("Failed to get current path for %s", dm_node_str(&parent_node));
return -1;
}
// Get parameter value
char *key_value = NULL;
if (dbmgr_get(node, &key_value) != 0 || !key_value) {
dmlog_debug("No value found for linker parameter %s", dm_node_str(node));
return 0;
}
// Compose linker string directly from parent_path, keeping instance numbers intact
const char *key_name = param->node.name;
size_t len_linker = strlen(parent_path) + strlen(key_name) + strlen(key_value) + 6; // extra for [==].
char *linker_string = malloc(len_linker);
if (!linker_string) {
dmlog_error("Failed to allocate memory for linker string");
free(key_value);
return -1;
}
dm_node_t parent_node_with_index = parent_node;
parent_node_with_index.cnt = 0;
dm_path_t parent_path_with_index;
if (dm_node2name_with_index(&parent_node_with_index, parent_path_with_index, sizeof(parent_path_with_index), "*") != 0) {
dmlog_error("Failed to get parent path for %s", dm_node_str(&parent_node));
return -1;
}
parent_path_with_index[strlen(parent_path_with_index) - 2] = '\0';
snprintf(linker_string, len_linker, "%s[%s==%s].", parent_path_with_index, key_name, key_value);
// Calculate hashes
char *hash_path = calculate_hash(linker_string);
char *hash_value = calculate_hash(current_path);
if (!hash_path || !hash_value) {
dmlog_error("Failed to calculate hashes for %s", linker_string);
free(linker_string);
free(key_value);
free(hash_path);
free(hash_value);
return -1;
}
// Update UCI sections
struct uci_section *ref_path_section = uci_lookup_section(ctx, pkg, "reference_path");
struct uci_section *ref_value_section = uci_lookup_section(ctx, pkg, "reference_value");
struct uci_section *service_section = uci_lookup_section(ctx, pkg, service_hash);
if (!ref_path_section || !ref_value_section || !service_section) {
dmlog_error("Failed to find required UCI sections");
free(linker_string);
free(key_value);
free(hash_path);
free(hash_value);
return -1;
}
// Set reference_path option
struct uci_ptr ptr;
memset(&ptr, 0, sizeof(ptr));
ptr.package = pkg->e.name;
ptr.section = "reference_path";
ptr.option = hash_path;
ptr.value = current_path;
uci_set(ctx, &ptr);
// Set reference_value option
memset(&ptr, 0, sizeof(ptr));
ptr.package = pkg->e.name;
ptr.section = "reference_value";
ptr.option = hash_value;
ptr.value = (strlen(key_value) > 0) ? key_value : "#";
uci_set(ctx, &ptr);
// Add to service section lists
memset(&ptr, 0, sizeof(ptr));
ptr.package = pkg->e.name;
ptr.section = service_hash;
ptr.option = "reference_path";
ptr.value = hash_path;
uci_add_list(ctx, &ptr);
memset(&ptr, 0, sizeof(ptr));
ptr.package = pkg->e.name;
ptr.section = service_hash;
ptr.option = "reference_value";
ptr.value = hash_value;
uci_add_list(ctx, &ptr);
// dmlog_debug("Processed linker: %s -> %s (path_hash=%s, value_hash=%s)",
// linker_string, current_path, hash_path, hash_value);
// Cleanup
free(linker_string);
free(key_value);
free(hash_path);
free(hash_value);
return 0;
}
/**
* Process all linker nodes by iterating through the linker nodes array
*/
static int process_all_linker_nodes(struct uci_context *ctx, struct uci_package *pkg,
const char *service_hash) {
// Iterate through all linker nodes
for (int i = 0; dm_linker_nodes[i] != INVALID_DM_NODE_ID; i++) {
dm_node_id_t linker_id = dm_linker_nodes[i];
// Check if this is a parameter node
if (!dm_node_is_parameter(linker_id)) {
continue;
}
// Get all instances of multi-instance objects that contain this linker parameter
const struct dm_parameter *param = dm_node_get_parameter(linker_id);
if (!param || !param->node.parent) {
continue;
}
// Find the object that contains this parameter
dm_node_id_t parent_id = param->node.parent->node_id;
// Only handle multi-instance objects; skip single-instance objects
if (dm_node_is_objectlist(parent_id)) {
dm_node_t parent_node = {0};
parent_node.id = parent_id;
// For multi-index nodes, bypass inode buffer and go directly to DB
dm_nodelist_h list = dm_nodelist_find(&parent_node, NULL, 1);
if (list != DM_INVALID_NODELIST) {
const dm_node_t *instance_node;
nodelist_for_each_node(instance_node, list) {
// dmlog_debug("processing linker node %s", dm_node_str(instance_node));
// Create the parameter node for this instance
dm_node_t param_node = *instance_node;
param_node.id = linker_id;
// Process this linker parameter instance
process_linker_node(ctx, pkg, service_hash, &param_node);
}
dm_nodelist_free(list);
}
}
}
return 0;
}
/**
* Clear existing entries for this service
*/
static int clear_service_entries(struct uci_context *ctx, struct uci_package *pkg,
const char *service_hash) {
struct uci_section *service_section = uci_lookup_section(ctx, pkg, service_hash);
if (!service_section) {
return 0; // Section doesn't exist yet
}
// Clear the lists
struct uci_ptr ptr;
memset(&ptr, 0, sizeof(ptr));
ptr.package = pkg->e.name;
ptr.section = service_hash;
ptr.option = "reference_path";
uci_delete(ctx, &ptr);
memset(&ptr, 0, sizeof(ptr));
ptr.package = pkg->e.name;
ptr.section = service_hash;
ptr.option = "reference_value";
uci_delete(ctx, &ptr);
return 0;
}
/**
* Ensure required UCI sections exist
*/
static int ensure_uci_sections(struct uci_context *ctx, struct uci_package *pkg) {
struct uci_ptr ptr;
// Ensure reference_path section exists
memset(&ptr, 0, sizeof(ptr));
ptr.package = pkg->e.name;
ptr.section = "reference_path";
ptr.value = "reference_path";
if (uci_set(ctx, &ptr) != UCI_OK) {
return -1;
}
// Ensure reference_value section exists
memset(&ptr, 0, sizeof(ptr));
ptr.package = pkg->e.name;
ptr.section = "reference_value";
ptr.value = "reference_value";
if (uci_set(ctx, &ptr) != UCI_OK) {
return -1;
}
return 0;
}
/**
* Main API function to refresh linker nodes
*/
int dm_refresh_linker_nodes(const char *service_name) {
dmlog_debug("dm_refresh_linker_nodes start");
if (!service_name) {
dmlog_error("Service name is required");
return -1;
}
// Step 1: Open and lock the reference database
int lock_fd = open(LOCK_FILE, O_CREAT | O_RDWR, 0644);
if (lock_fd < 0) {
dmlog_error("Failed to open lock file: %s", LOCK_FILE);
return -1;
}
if (flock(lock_fd, LOCK_EX) != 0) {
dmlog_error("Failed to acquire exclusive lock");
close(lock_fd);
return -1;
}
// Step 2: Calculate service hash
char *service_hash = calculate_hash(service_name);
if (!service_hash) {
dmlog_error("Failed to calculate service hash");
close(lock_fd);
return -1;
}
dmlog_debug("service_hash: %s", service_hash);
// Step 3: Initialize UCI context and point to /var/state for runtime reference DB
struct uci_context *ctx = uci_alloc_context();
if (!ctx) {
dmlog_error("Failed to allocate UCI context");
free(service_hash);
close(lock_fd);
return -1;
}
// Use /var/state as configuration directory
uci_set_confdir(ctx, "/var/state");
// Load or create the UCI package
struct uci_package *pkg = NULL;
if (uci_load(ctx, UCI_PACKAGE, &pkg) != UCI_OK) {
// Package doesn't exist, create it
struct uci_ptr ptr;
memset(&ptr, 0, sizeof(ptr));
ptr.package = UCI_PACKAGE;
if (uci_set(ctx, &ptr) != UCI_OK) {
dmlog_error("Failed to create UCI package: %s", UCI_PACKAGE);
uci_free_context(ctx);
free(service_hash);
close(lock_fd);
return -1;
}
pkg = ptr.p;
}
// Step 4: Ensure required sections exist
if (ensure_uci_sections(ctx, pkg) != 0) {
dmlog_error("Failed to ensure UCI sections");
uci_free_context(ctx);
free(service_hash);
close(lock_fd);
return -1;
}
// Create service section
struct uci_ptr ptr;
memset(&ptr, 0, sizeof(ptr));
ptr.package = pkg->e.name;
ptr.section = service_hash;
ptr.value = "service";
uci_set(ctx, &ptr);
// Step 5: Clear existing entries for this service
clear_service_entries(ctx, pkg, service_hash);
// Step 6: Process all linker nodes
if (process_all_linker_nodes(ctx, pkg, service_hash) != 0) {
dmlog_error("Failed to process linker nodes");
uci_free_context(ctx);
free(service_hash);
close(lock_fd);
return -1;
}
// Step 7: Commit changes
if (uci_commit(ctx, &pkg, false) != UCI_OK) {
dmlog_error("Failed to commit UCI changes");
uci_free_context(ctx);
free(service_hash);
close(lock_fd);
return -1;
}
// Cleanup
uci_free_context(ctx);
free(service_hash);
// Step 8: Release lock
flock(lock_fd, LOCK_UN);
close(lock_fd);
dmlog_info("Successfully refreshed linker nodes for service: %s", service_name);
return 0;
}
/*
* Resolve the linker parameter value for a given object instance path.
* The object_path should include the instance number, for example
* "Device.IP.Interface.1"
* On success, *value will point to a malloc()'d string that must be freed
* by the caller. The function returns 0 on success or -1 on error.
*/
int dm_resolve_linker(const char *object_path, char **value) {
if (!object_path || !value) {
dmlog_error("Invalid arguments to dm_resolve_linker");
return -1;
}
*value = NULL;
/* Calculate the hash used as option name in the reference_value section */
char *hash_str = calculate_hash(object_path);
if (!hash_str) {
dmlog_error("Failed to calculate hash for %s", object_path);
return -1;
}
struct uci_context *ctx = uci_alloc_context();
if (!ctx) {
dmlog_error("Failed to allocate UCI context");
free(hash_str);
return -1;
}
/* reference DB lives in /var/state */
uci_set_confdir(ctx, "/var/state");
struct uci_package *pkg = NULL;
if (uci_load(ctx, UCI_PACKAGE, &pkg) != UCI_OK || !pkg) {
dmlog_error("Failed to load UCI package %s", UCI_PACKAGE);
uci_free_context(ctx);
free(hash_str);
return -1;
}
struct uci_section *ref_value_section = uci_lookup_section(ctx, pkg, "reference_value");
if (!ref_value_section) {
dmlog_error("reference_value section not found in %s", UCI_PACKAGE);
uci_free_context(ctx);
free(hash_str);
return -1;
}
const char *str_val = uci_lookup_option_string(ctx, ref_value_section, hash_str);
if (!str_val) {
dmlog_debug("No linker value found for %s (hash=%s)", object_path, hash_str);
uci_free_context(ctx);
free(hash_str);
return -1;
}
/* Allocate and copy result for caller */
if (strcmp(str_val, "#") == 0) {
/* '#' is used to represent an empty string */
*value = strdup("");
} else {
*value = strdup(str_val);
}
if (!*value) {
uci_free_context(ctx);
free(hash_str);
return -1;
}
uci_free_context(ctx);
free(hash_str);
return 0;
}
/**
* Resolve the object path for a given linker value.
*
* This helper reconstructs the deterministic linker string of the form
* "<base_path>[<key_name>==<key_value>]."
* that was generated by dm_refresh_linker_nodes() and uses its hash to
* look up the real object path from the reference database.
*
* If no matching entry exists in the database the function will fall
* back to returning the linker string itself, exactly mimicking the
* behaviour of legacy _bbfdm_get_references().
*
* The caller is responsible for freeing the returned string.
*
* @param base_path Base object path ending with a dot (e.g. "Device.WiFi.SSID.")
* @param key_name Parameter name that acts as linker key (e.g. "Name")
* @param key_value Desired value of the linker key
* @param object_path [out] malloc-allocated string holding either the
* resolved object path (e.g. "Device.WiFi.SSID.4")
* (NULL when no match is found)
* @return 0 when resolved, -1 if no match/error
*/
int dm_resolve_linker_path(const char *base_path,
const char *key_name,
const char *key_value,
char **object_path) {
if (!base_path || !key_name || !key_value || !object_path) {
dmlog_error("Invalid arguments to dm_resolve_linker_path");
return -1;
}
if (strlen(base_path) == 0 || strlen(key_name) == 0 || strlen(key_value) == 0) {
dmlog_error("base_path, key_name and key_value must not be empty");
return -1;
}
*object_path = NULL;
/* Compose the linker string that was used as basis for the reference hash */
size_t linker_len = strlen(base_path) + strlen(key_name) + strlen(key_value) + 6; /* "[==]." */
char *linker_string = malloc(linker_len);
if (!linker_string) {
return -1;
}
snprintf(linker_string, linker_len, "%s[%s==%s].", base_path, key_name, key_value);
/* Calculate hash for use as option name inside reference_path section */
char *hash_path = calculate_hash(linker_string);
if (!hash_path) {
free(linker_string);
return -1;
}
/* Open UCI context pointing to runtime reference DB (/var/state) */
struct uci_context *ctx = uci_alloc_context();
if (!ctx) {
dmlog_error("Failed to allocate UCI context");
free(linker_string);
free(hash_path);
return -1;
}
uci_set_confdir(ctx, "/var/state");
struct uci_package *pkg = NULL;
if (uci_load(ctx, UCI_PACKAGE, &pkg) != UCI_OK || !pkg) {
dmlog_error("Failed to load UCI package %s", UCI_PACKAGE);
uci_free_context(ctx);
free(linker_string);
free(hash_path);
return -1;
}
struct uci_section *ref_path_section = uci_lookup_section(ctx, pkg, "reference_path");
if (!ref_path_section) {
dmlog_error("reference_path section not found in %s", UCI_PACKAGE);
uci_free_context(ctx);
free(linker_string);
free(hash_path);
return -1;
}
const char *uci_path = uci_lookup_option_string(ctx, ref_path_section, hash_path);
int ret = -1; /* default: not resolved */
if (uci_path) {
/* Matching object path found */
*object_path = strdup(uci_path);
if (*object_path)
ret = 0;
} else {
/* No match. Leave *object_path NULL and return error */
ret = -1;
}
uci_free_context(ctx);
free(linker_string);
free(hash_path);
return ret;
}

View File

@@ -1,89 +0,0 @@
/*
* Copyright (c) 2023 Genexis B.V. All rights reserved.
*
* This Software and its content are protected by the Dutch Copyright Act
* ('Auteurswet'). All and any copying and distribution of the software
* and its content without authorization by Genexis B.V. is
* prohibited. The prohibition includes every form of reproduction and
* distribution.
*
*/
#ifndef DM_LINKER_H
#define DM_LINKER_H
#ifdef __cplusplus
extern "C" {
#endif
/**
* Refresh linker nodes for a given service
*
* This function walks through the entire data model tree and updates the
* UCI reference database with linker parameter information for the specified
* service. It creates a deterministic mapping between object paths and their
* linker parameter values.
*
* The function:
* 1. Takes an exclusive lock on the reference database
* 2. Calculates a service hash from the service name
* 3. Prepares the UCI package with required sections
* 4. Walks through all multi-instance objects looking for linker parameters
* 5. Builds linker strings and generates hashes
* 6. Updates the UCI reference database
* 7. Commits changes and releases the lock
*
* @param service_name The UBus object name of the service (e.g., "bbfdm.core")
* @return 0 on success, -1 on error
*/
int dm_refresh_linker_nodes(const char *service_name);
/**
* Check if a node ID is a linker node
*
* This function checks if the given node ID is in the linker nodes array.
*
* @param node_id The node ID to check
* @return 1 if the node is a linker node, 0 otherwise
*/
int is_linker_node(dm_node_id_t node_id);
/**
* Check if an object has a linker parameter
*
* This function checks if the given object has any parameters that are linker nodes.
*
* @param object_id The object ID to check
* @return 1 if the object has a linker parameter, 0 otherwise
*/
int object_has_linker_param(dm_node_id_t object_id);
/**
* Resolve the linker parameter value for a given object instance path
*
* This helper looks up the reference database that was populated by
* dm_refresh_linker_nodes() and returns the value of the linker
* parameter belonging to the supplied object path.
*
* The caller is responsible for freeing the returned string.
*
* @param object_path Full object path including instance number
* (e.g. "Device.IP.Interface.1")
* @param value[out] Pointer that will receive the malloc-allocated value
* @return 0 on success, -1 on failure
*/
int dm_resolve_linker(const char *object_path, char **value);
/*
* Resolve object path from a linker value.
* Returns 0 and sets *object_path on success.
* Returns -1 and leaves *object_path NULL if no matching object path exists
* or on error.
*/
int dm_resolve_linker_path(const char *base_path, const char *key_name, const char *key_value, char **object_path);
#ifdef __cplusplus
}
#endif
#endif /* DM_LINKER_H */

Some files were not shown because too many files have changed in this diff Show More