From c83ebf133decf278236c00d6d4ed689fb2960781 Mon Sep 17 00:00:00 2001 From: Andrew Grasso Date: Fri, 17 Nov 2023 19:43:53 +0000 Subject: [PATCH 001/176] Fix update to the Debian sudoers file --- debian/packetfence.sudoers | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/packetfence.sudoers b/debian/packetfence.sudoers index e7ad7ed94bbe..d6cd4d7da1a6 100644 --- a/debian/packetfence.sudoers +++ b/debian/packetfence.sudoers @@ -1,5 +1,5 @@ pf ALL=NOPASSWD: /bin/systemctl, /sbin/iptables, /usr/sbin/ip6tables, /usr/sbin/ipset, /sbin/ipset, /sbin/ip, /sbin/vconfig, /sbin/route, /usr/bin/systemctl, /usr/bin/tee, /usr/local/pf/sbin/pfdhcplistener, /bin/kill, /usr/sbin/freeradius, /usr/sbin/chroot, /usr/local/pf/bin/pfcmd, /usr/sbin/conntrack, /usr/bin/timedatectl, /usr/bin/hostnamectl -Defaults env_check="PF_UID PF_GID", !requiretty +Defaults env_check+="PF_UID PF_GID", !requiretty # Do not log commands that starts by '/sbin/ip netns exec' # i.e. net ads join, testjoin and leave Cmnd_Alias NETADS = /sbin/ip netns exec * From b2b03db90e9af0b6d416abe48b88292bd75ab067 Mon Sep 17 00:00:00 2001 From: snyk-bot Date: Sat, 7 Sep 2024 09:28:41 +0000 Subject: [PATCH 002/176] fix: containers/pfsetacls/Dockerfile to reduce vulnerabilities The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-DEBIAN12-EXPAT-7855502 - https://snyk.io/vuln/SNYK-DEBIAN12-EXPAT-7855503 - https://snyk.io/vuln/SNYK-DEBIAN12-EXPAT-7855507 - https://snyk.io/vuln/SNYK-DEBIAN12-CURL-7575306 - https://snyk.io/vuln/SNYK-DEBIAN12-CURL-7575306 --- containers/pfsetacls/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/containers/pfsetacls/Dockerfile b/containers/pfsetacls/Dockerfile index c4a70600dfd8..49a536bd0bd1 100644 --- a/containers/pfsetacls/Dockerfile +++ b/containers/pfsetacls/Dockerfile @@ -1,5 +1,5 @@ -FROM golang:1.22.5-bookworm +FROM golang:1.23.0-bookworm ENV SEMAPHORE_VERSION="development" SEMAPHORE_ARCH="linux_amd64" \ SEMAPHORE_CONFIG_PATH="${SEMAPHORE_CONFIG_PATH:-/etc/semaphore}" \ From 7f2276a3bf44236ede33ae83d32349779c7ab842 Mon Sep 17 00:00:00 2001 From: Durand Date: Tue, 10 Sep 2024 16:25:09 -0400 Subject: [PATCH 003/176] Use new FreeRADIUS 3.2.6 version (#8290) --- bin/pyntlm_auth/handlers.py | 29 +++++++++++++++++------------ bin/pyntlm_auth/rpc.py | 28 ++++++++++++++++------------ 2 files changed, 33 insertions(+), 24 deletions(-) diff --git a/bin/pyntlm_auth/handlers.py b/bin/pyntlm_auth/handlers.py index 7eb35c8b116b..655763ef4e27 100644 --- a/bin/pyntlm_auth/handlers.py +++ b/bin/pyntlm_auth/handlers.py @@ -1,17 +1,17 @@ -from http import HTTPStatus -from flask import Flask, request, g -import ncache -import re -import hashlib import binascii -import json -import utils -import ms_event +import hashlib +import re +from http import HTTPStatus + +from flask import request, g +from samba import ntstatus + +import flags import global_vars +import ms_event +import ncache import rpc -import flags -from samba import param, NTSTATUSError, ntstatus # For NTSTATUS, see: # https://github.com/samba-team/samba/blob/master/libcli/util/ntstatus_err_table.txt @@ -89,6 +89,7 @@ def test_password_handler(): def ntlm_auth_handler(): try: required_keys = {'username', 'mac', 'request-nt-key', 'challenge', 'nt-response'} + optional_keys = {'domain'} data = request.get_json() if data is None: @@ -102,6 +103,11 @@ def ntlm_auth_handler(): challenge = data['challenge'] nt_response = data['nt-response'] + if 'domain' in data: + domain = data['domain'] + else: + domain = global_vars.c_domain + except Exception as e: return f"Error processing JSON payload, {str(e)}", HTTPStatus.UNPROCESSABLE_ENTITY @@ -114,7 +120,7 @@ def ntlm_auth_handler(): domain = global_vars.c_domain_identifier nt_key, error_code, info = ncache.cached_login(domain, account_username, mac, challenge, nt_response, ) else: - nt_key, error_code, info = rpc.transitive_login(account_username, challenge, nt_response) + nt_key, error_code, info = rpc.transitive_login(account_username, challenge, nt_response, domain=domain) return format_response(nt_key, error_code) @@ -162,4 +168,3 @@ def ntlm_expire_handler(): return "OK", HTTPStatus.OK except Exception as e: return f"Error processing JSON payload, {str(e)}", HTTPStatus.UNPROCESSABLE_ENTITY - diff --git a/bin/pyntlm_auth/rpc.py b/bin/pyntlm_auth/rpc.py index 04b7096ebff6..4bf888ef0fcc 100644 --- a/bin/pyntlm_auth/rpc.py +++ b/bin/pyntlm_auth/rpc.py @@ -1,14 +1,16 @@ -import global_vars -import config_generator -from samba import param, NTSTATUSError, ntstatus +import binascii +import datetime +import random + +from samba import param, NTSTATUSError from samba.credentials import Credentials, DONT_USE_KERBEROS -from samba.dcerpc.misc import SEC_CHAN_WKSTA from samba.dcerpc import netlogon -import utils -import datetime +from samba.dcerpc.misc import SEC_CHAN_WKSTA from samba.dcerpc.netlogon import (netr_Authenticator, MSV1_0_ALLOW_WORKSTATION_TRUST_ACCOUNT, MSV1_0_ALLOW_MSVCHAPV2) -import binascii -import random + +import config_generator +import global_vars +import utils def init_secure_connection(): @@ -20,11 +22,11 @@ def init_secure_connection(): password = global_vars.c_password domain = global_vars.c_domain username = global_vars.c_username - server_name = global_vars.c_server_name # FQDN of Domain Controller + server_name = global_vars.c_server_name # FQDN of Domain Controller domain_controller_records = utils.find_ldap_servers(global_vars.c_realm, global_vars.c_dns_servers) if len(domain_controller_records) > 0: - idx = random.randint(0, len(domain_controller_records) -1) + idx = random.randint(0, len(domain_controller_records) - 1) record = domain_controller_records[idx] server_name = record.get('target') @@ -95,9 +97,11 @@ def get_secure_channel_connection(): return global_vars.s_secure_channel_connection, global_vars.s_machine_cred, global_vars.s_connection_id, 0, "" -def transitive_login(account_username, challenge, nt_response): +def transitive_login(account_username, challenge, nt_response, domain=None): + if domain is None: + domain = global_vars.c_domain + server_name = global_vars.c_server_name - domain = global_vars.c_domain workstation = global_vars.c_workstation global_vars.s_secure_channel_connection, global_vars.s_machine_cred, global_vars.s_connection_id, error_code, error_message = get_secure_channel_connection() if error_code != 0: From 1d3e1bb2b8430695c8a08f691986bc1a798441b7 Mon Sep 17 00:00:00 2001 From: Zhihao Ma Date: Mon, 16 Sep 2024 10:41:09 -0400 Subject: [PATCH 004/176] update ntlm_auth_wrapper to accept --domain parameter as well --- src/ntlm_auth_wrap.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/ntlm_auth_wrap.c b/src/ntlm_auth_wrap.c index d1d866365dd1..a7832594e779 100644 --- a/src/ntlm_auth_wrap.c +++ b/src/ntlm_auth_wrap.c @@ -360,6 +360,8 @@ char **argv, **envp; cJSON_AddStringToObject(json, "nt-response", argv[i] + strlen("--nt-response=")); } else if (strncmp(argv[i], "--mac=", strlen("--mac=")) == 0) { cJSON_AddStringToObject(json, "mac", argv[i] + strlen("--mac=")); + } else if (strncmp(argv[i], "--domain=", strlen("--domain=")) == 0) { + cJSON_AddStringToObject(json, "domain", argv[i] + strlen("--domain=")); } } From 4849685b5cbb2615a8e4bc2437c262a095a22c12 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Thu, 19 Sep 2024 16:11:36 -0400 Subject: [PATCH 005/176] Fix Mariadb version to 10.11.6 --- rpm/packetfence.spec | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rpm/packetfence.spec b/rpm/packetfence.spec index 8275f36c72e9..1a86fcdab9e5 100644 --- a/rpm/packetfence.spec +++ b/rpm/packetfence.spec @@ -61,8 +61,8 @@ Requires: net-snmp >= 5.3.2.2 Requires: net-snmp-perl Requires: perl >= %{perl_version} Requires: packetfence-perl >= 1.2.4 -Requires: MariaDB-server >= 10.5.15, MariaDB-server < 10.6.0 -Requires: MariaDB-client >= 10.5.15, MariaDB-client < 10.6.0 +Requires: MariaDB-server = 10.11.6 +Requires: MariaDB-client = 10.11.6 Requires: perl(DBD::mysql) Requires: perl(Sub::Exporter) Requires: perl(Cisco::AccessList::Parser) From 1e63c96774a68c90d7d71897042404e5782c7229 Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Fri, 20 Sep 2024 15:28:42 -0400 Subject: [PATCH 006/176] Added an action to update CIDR networks based on event handler --- .../root/src/views/Configuration/eventHandlers/config.js | 8 ++++++++ lib/pf/api.pm | 9 ++++++--- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/html/pfappserver/root/src/views/Configuration/eventHandlers/config.js b/html/pfappserver/root/src/views/Configuration/eventHandlers/config.js index 98e4bf9cdd9a..9da6d575b253 100644 --- a/html/pfappserver/root/src/views/Configuration/eventHandlers/config.js +++ b/html/pfappserver/root/src/views/Configuration/eventHandlers/config.js @@ -168,6 +168,14 @@ export const regexRuleActions = { siblings: { api_parameters: { default: 'mac, $mac' } } + }, + update_switch_role_network: { + value: 'update_switch_role_network', + text: i18n.t('Update Switch Role CIDR'), + types: [pfFieldType.SUBSTRING], + siblings: { + api_parameters: { default: 'mac, $mac, ip, $ip' } + } } } diff --git a/lib/pf/api.pm b/lib/pf/api.pm index f1f86167e351..234661a8eab8 100644 --- a/lib/pf/api.pm +++ b/lib/pf/api.pm @@ -1958,16 +1958,19 @@ sub push_acls : Public { =head2 update_switch_role_network -Update switch role network based on provided IP addresses and MAC addresses role and netmask +Update switch role network based on provided IP addresses and MAC addresses =cut -sub update_switch_role_network : Public :AllowedAsAction(mac, $mac, ip, $ip, mask, $mask, lease_length, $lease_length) { +sub update_switch_role_network : Public :AllowedAsAction(mac, $mac, ip, $ip) { my ($class, %postdata) = @_; - my @require = qw(mac ip mask lease_length); + my @require = qw(mac ip); my @found = grep {exists $postdata{$_}} @require; return unless pf::util::validate_argv(\@require, \@found); + $postdata{"mac"} = $postdata{"mask"} // undef; + $postdata{"lease_length"} = $postdata{"lease_length"} // undef; + my $cidr_role = pf::cidr_role->new(); $cidr_role->update(%postdata); From 7b7dd89d35be839011ca938efb98938635ec0c8d Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Mon, 23 Sep 2024 09:08:24 -0400 Subject: [PATCH 007/176] Fix typo --- lib/pf/api.pm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/pf/api.pm b/lib/pf/api.pm index 234661a8eab8..a18f96466f8e 100644 --- a/lib/pf/api.pm +++ b/lib/pf/api.pm @@ -1968,7 +1968,7 @@ sub update_switch_role_network : Public :AllowedAsAction(mac, $mac, ip, $ip) { my @found = grep {exists $postdata{$_}} @require; return unless pf::util::validate_argv(\@require, \@found); - $postdata{"mac"} = $postdata{"mask"} // undef; + $postdata{"mask"} = $postdata{"mask"} // undef; $postdata{"lease_length"} = $postdata{"lease_length"} // undef; my $cidr_role = pf::cidr_role->new(); From 998dc11c678105ad66a44592bc91336acd693697 Mon Sep 17 00:00:00 2001 From: James Rouzier Date: Mon, 23 Sep 2024 15:52:49 +0000 Subject: [PATCH 008/176] Fix schema changes --- db/pf-schema-X.Y.sql | 4 ++-- db/upgrade-X.X-X.Y.sql | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/db/pf-schema-X.Y.sql b/db/pf-schema-X.Y.sql index 8192cbc46428..46ecce4a44ed 100644 --- a/db/pf-schema-X.Y.sql +++ b/db/pf-schema-X.Y.sql @@ -1455,9 +1455,9 @@ CREATE TABLE `pki_profiles` ( `revoked_valid_until` bigint(20) DEFAULT 14, `cloud_enabled` bigint(20) DEFAULT NULL, `cloud_service` longtext DEFAULT NULL, - `scep_server_enabled` bigint(20) DEFAULT 0, `scep_server_id` bigint(20) unsigned DEFAULT NULL, - `allow_duplicated_cn` bigint(20) DEFAULT 0, + `scep_server_enabled` bigint(20) DEFAULT 0, + `allow_duplicated_cn` bigint(20) unsigned DEFAULT 0, `maximum_duplicated_cn` bigint(20) DEFAULT 0, PRIMARY KEY (`id`), UNIQUE KEY `name` (`name`), diff --git a/db/upgrade-X.X-X.Y.sql b/db/upgrade-X.X-X.Y.sql index f86dc998644a..f9e65c5b2af8 100644 --- a/db/upgrade-X.X-X.Y.sql +++ b/db/upgrade-X.X-X.Y.sql @@ -53,8 +53,10 @@ DROP PROCEDURE IF EXISTS ValidateVersion; \! echo "altering pki_profiles" ALTER TABLE `pki_profiles` - ADD IF NOT EXISTS `allow_duplicated_cn` bigint(20) unsigned DEFAULT 0 AFTER `scep_server_id`, - ADD IF NOT EXISTS `maximum_duplicated_cn` bigint(20) DEFAULT 0; + ADD IF NOT EXISTS `allow_duplicated_cn` bigint(20) UNSIGNED DEFAULT 0, + ADD IF NOT EXISTS `maximum_duplicated_cn` bigint(20) DEFAULT 0, + MODIFY `scep_server_enabled` bigint(20) DEFAULT 0, + RENAME INDEX scep_server__id TO scep_server_id; \! echo "altering pki_certs" ALTER TABLE `pki_certs` From 59ad0177f56022237de279b0e070bcd1343f62e9 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Mon, 23 Sep 2024 12:04:33 -0400 Subject: [PATCH 009/176] Use sup or equal insted of the real value --- rpm/packetfence.spec | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rpm/packetfence.spec b/rpm/packetfence.spec index 1a86fcdab9e5..9eaa122d985b 100644 --- a/rpm/packetfence.spec +++ b/rpm/packetfence.spec @@ -61,8 +61,8 @@ Requires: net-snmp >= 5.3.2.2 Requires: net-snmp-perl Requires: perl >= %{perl_version} Requires: packetfence-perl >= 1.2.4 -Requires: MariaDB-server = 10.11.6 -Requires: MariaDB-client = 10.11.6 +Requires: MariaDB-server >= 10.11 +Requires: MariaDB-client >= 10.11 Requires: perl(DBD::mysql) Requires: perl(Sub::Exporter) Requires: perl(Cisco::AccessList::Parser) From 2435690911d0547adbee8c7a4ea02d434993c0b3 Mon Sep 17 00:00:00 2001 From: James Rouzier Date: Tue, 24 Sep 2024 11:52:50 +0000 Subject: [PATCH 010/176] Fix after merge --- html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO.pm | 2 +- .../lib/pfappserver/Form/Config/Firewall_SSO/BarracudaNG.pm | 2 +- .../lib/pfappserver/Form/Config/Firewall_SSO/CiscoIsePic.pm | 2 +- .../lib/pfappserver/Form/Config/Firewall_SSO/ContentKeeper.pm | 2 +- .../lib/pfappserver/Form/Config/Firewall_SSO/FamilyZone.pm | 2 +- .../lib/pfappserver/Form/Config/Firewall_SSO/FortiGate.pm | 2 +- .../lib/pfappserver/Form/Config/Firewall_SSO/Iboss.pm | 2 +- .../lib/pfappserver/Form/Config/Firewall_SSO/JSONRPC.pm | 2 +- .../lib/pfappserver/Form/Config/Firewall_SSO/JuniperSRX.pm | 2 +- .../lib/pfappserver/Form/Config/Firewall_SSO/PaloAlto.pm | 2 +- .../lib/pfappserver/Form/Config/Firewall_SSO/WatchGuard.pm | 2 +- lib/pf/enforcement.pm | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO.pm b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO.pm index 6b1432e77a7c..c5d4386402ba 100644 --- a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO.pm +++ b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO.pm @@ -155,7 +155,7 @@ has_field 'sso_on_access_reevaluation', has_block 'definition' => ( - render_list => [ qw(id type password port categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop, sso_on_access_reevaluation, sso_on_accounting, sso_on_dhcp) ], + render_list => [ qw(id type password port categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop sso_on_access_reevaluation sso_on_accounting sso_on_dhcp) ], ); =head2 Methods diff --git a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/BarracudaNG.pm b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/BarracudaNG.pm index 11553e5ffc6d..6b4092a68f60 100644 --- a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/BarracudaNG.pm +++ b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/BarracudaNG.pm @@ -42,7 +42,7 @@ has_field 'type' => has_block definition => ( - render_list => [ qw(id type username password port categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop, sso_on_access_reevaluation, sso_on_accounting, sso_on_dhcp) ], + render_list => [ qw(id type username password port categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop sso_on_access_reevaluation sso_on_accounting sso_on_dhcp) ], ); =over diff --git a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/CiscoIsePic.pm b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/CiscoIsePic.pm index 25d428267511..f6ef14381203 100644 --- a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/CiscoIsePic.pm +++ b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/CiscoIsePic.pm @@ -29,7 +29,7 @@ has_field 'password' => has_block definition => ( - render_list => [ qw(id type port categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop, sso_on_access_reevaluation, sso_on_accounting, sso_on_dhcp) ], + render_list => [ qw(id type port categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop sso_on_access_reevaluation sso_on_accounting sso_on_dhcp) ], ); =head1 COPYRIGHT diff --git a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/ContentKeeper.pm b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/ContentKeeper.pm index b9b8314ed550..3ea55138861d 100644 --- a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/ContentKeeper.pm +++ b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/ContentKeeper.pm @@ -30,7 +30,7 @@ has_field 'type' => has_block definition => ( - render_list => [ qw(id type password port categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop, sso_on_access_reevaluation, sso_on_accounting, sso_on_dhcp) ], + render_list => [ qw(id type password port categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop sso_on_access_reevaluation sso_on_accounting sso_on_dhcp) ], ); =head1 COPYRIGHT diff --git a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/FamilyZone.pm b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/FamilyZone.pm index a3f78a26eb9d..bc1111215293 100644 --- a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/FamilyZone.pm +++ b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/FamilyZone.pm @@ -46,7 +46,7 @@ has_field 'deviceid' => has_block definition => ( - render_list => [ qw(id type deviceid password categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop, sso_on_access_reevaluation, sso_on_accounting, sso_on_dhcp) ], + render_list => [ qw(id type deviceid password categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop sso_on_access_reevaluation sso_on_accounting sso_on_dhcp) ], ); =head1 COPYRIGHT diff --git a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/FortiGate.pm b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/FortiGate.pm index 27b965a6e97e..b31be8ad88eb 100644 --- a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/FortiGate.pm +++ b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/FortiGate.pm @@ -30,7 +30,7 @@ has_field 'type' => has_block definition => ( - render_list => [ qw(id type password port categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop, sso_on_access_reevaluation, sso_on_accounting, sso_on_dhcp) ], + render_list => [ qw(id type password port categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop sso_on_access_reevaluation sso_on_accounting sso_on_dhcp) ], ); =head1 COPYRIGHT diff --git a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/Iboss.pm b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/Iboss.pm index 016382c01edb..52af56e36080 100644 --- a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/Iboss.pm +++ b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/Iboss.pm @@ -44,7 +44,7 @@ has_field 'type' => has_block definition => ( - render_list => [ qw(id type password port nac_name categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop, sso_on_access_reevaluation, sso_on_accounting, sso_on_dhcp) ], + render_list => [ qw(id type password port nac_name categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop sso_on_access_reevaluation sso_on_accounting sso_on_dhcp) ], ); =head1 COPYRIGHT diff --git a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/JSONRPC.pm b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/JSONRPC.pm index 2862f393e003..f5518105e5ad 100644 --- a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/JSONRPC.pm +++ b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/JSONRPC.pm @@ -44,7 +44,7 @@ has_field 'type' => has_block definition => ( - render_list => [ qw(id type username password port categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop, sso_on_access_reevaluation, sso_on_accounting, sso_on_dhcp) ], + render_list => [ qw(id type username password port categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop sso_on_access_reevaluation sso_on_accounting sso_on_dhcp) ], ); =head1 COPYRIGHT diff --git a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/JuniperSRX.pm b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/JuniperSRX.pm index af35bcfc3027..b7e48fc08d00 100644 --- a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/JuniperSRX.pm +++ b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/JuniperSRX.pm @@ -49,7 +49,7 @@ has_field 'port' => ); has_block definition => ( - render_list => [ qw(id type username password port categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop, sso_on_access_reevaluation, sso_on_accounting, sso_on_dhcp) ], + render_list => [ qw(id type username password port categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop sso_on_access_reevaluation sso_on_accounting sso_on_dhcp) ], ); has_field 'type' => diff --git a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/PaloAlto.pm b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/PaloAlto.pm index 1a181b5b811d..e535cf2600b0 100644 --- a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/PaloAlto.pm +++ b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/PaloAlto.pm @@ -59,7 +59,7 @@ has_field 'vsys' => has_block definition => ( - render_list => [ qw(id type vsys transport port password categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop, sso_on_access_reevaluation, sso_on_accounting, sso_on_dhcp) ], + render_list => [ qw(id type vsys transport port password categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop sso_on_access_reevaluation sso_on_accounting sso_on_dhcp) ], ); =head1 COPYRIGHT diff --git a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/WatchGuard.pm b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/WatchGuard.pm index 85ba68120f49..8567b7230892 100644 --- a/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/WatchGuard.pm +++ b/html/pfappserver/lib/pfappserver/Form/Config/Firewall_SSO/WatchGuard.pm @@ -30,7 +30,7 @@ has_field 'type' => has_block definition => ( - render_list => [ qw(id type password port categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop, sso_on_access_reevaluation, sso_on_accounting, sso_on_dhcp) ], + render_list => [ qw(id type password port categories networks cache_updates cache_timeout username_format default_realm act_on_accounting_stop sso_on_access_reevaluation sso_on_accounting sso_on_dhcp) ], ); =head1 COPYRIGHT diff --git a/lib/pf/enforcement.pm b/lib/pf/enforcement.pm index 7372437d51cd..c3f09de11116 100644 --- a/lib/pf/enforcement.pm +++ b/lib/pf/enforcement.pm @@ -90,7 +90,7 @@ sub reevaluate_access { $opts{'force'} = '1' if ($function eq 'admin_modify'); my $ip = pf::ip4log::mac2ip($mac); my $sync = $opts{sync}; - if (scalar keys %ConfigFirewallSSO != 0 && (grep { $_ eq $TRUE } map { $_->{'sso_on_access_reevaluation'} } values %ConfigFirewallSSO) ) { + if ( (grep { defined $_ && $_ eq $TRUE } map { $_->{'sso_on_access_reevaluation'} } values %ConfigFirewallSSO) ) { my $node = node_attributes($mac); if ($ip) { my $firewallsso_method = ( $node->{status} eq $STATUS_REGISTERED ) ? "Update" : "Stop"; From e96f1bca7b4756ecc718e9420a1d060c008607b8 Mon Sep 17 00:00:00 2001 From: Durand Date: Tue, 24 Sep 2024 08:37:24 -0400 Subject: [PATCH 011/176] Updated pfconnector-remote install procedure (#8321) --- docs/installation/linode/linode.asciidoc | 55 +++++++++++++++++++++--- 1 file changed, 50 insertions(+), 5 deletions(-) diff --git a/docs/installation/linode/linode.asciidoc b/docs/installation/linode/linode.asciidoc index e4bbd10d102b..b00027df4016 100644 --- a/docs/installation/linode/linode.asciidoc +++ b/docs/installation/linode/linode.asciidoc @@ -94,12 +94,26 @@ Current limitations: To deploy the PacketFence Connector, first provision on your local network (where NAS devices reside) a x86_64 Debian 12 virtual machine with minimal resources (2GB of RAM, 1 CPU core and 10GB of disk space). Then, perform the following commands as root: +[source,bash] +---- +apt-get update +apt install gnupg sudo curl +curl -fsSL https://inverse.ca/downloads/GPG_PUBLIC_KEY | gpg --dearmor -o /etc/apt/keyrings/packetfence.gpg +---- + +Create a file named [filename]`/etc/apt/sources.list.d/packetfence-pfconnector-remote.list`: + +// subs=attributes allow to substitute {release_minor} in code block [source,bash,subs="attributes"] ---- -apt update && apt install gnupg sudo -echo 'deb http://inverse.ca/downloads/PacketFence/debian/{release_minor} bookworm bookworm' > \ +echo "deb [signed-by=/etc/apt/keyrings/packetfence.gpg] http://inverse.ca/downloads/PacketFence/debian/{release_minor} bookworm bookworm" > \ /etc/apt/sources.list.d/packetfence-pfconnector-remote.list -wget -q -O - https://inverse.ca/downloads/GPG_PUBLIC_KEY | sudo apt-key add - +---- + +Install and configure the connector + +[source,bash,subs="attributes"] +---- apt update apt install packetfence-pfconnector-remote /usr/local/pfconnector-remote/bin/pfconnector-configure @@ -135,10 +149,25 @@ PacketFence Connector released with PacketFence 12.0.0 was not packaged. In order to upgrade your PacketFence Connector to a packaged version, you need to run following commands: +[source,bash] +---- +apt-get update +apt install gnupg sudo curl +curl -fsSL https://inverse.ca/downloads/GPG_PUBLIC_KEY | gpg --dearmor -o /etc/apt/keyrings/packetfence.gpg +---- + +Create a file named [filename]`/etc/apt/sources.list.d/packetfence-pfconnector-remote.list`: + +// subs=attributes allow to substitute {release_minor} in code block [source,bash,subs="attributes"] ---- -echo 'deb http://inverse.ca/downloads/PacketFence/debian/{release_minor} bookworm bookworm' > \ +echo "deb [signed-by=/etc/apt/keyrings/packetfence.gpg] http://inverse.ca/downloads/PacketFence/debian/{release_minor} bookworm bookworm" > \ /etc/apt/sources.list.d/packetfence-pfconnector-remote.list +---- + + +[source,bash,subs="attributes"] +---- apt update apt install -y -o Dpkg::Options::="--force-confnew" packetfence-pfconnector-remote ---- @@ -157,10 +186,26 @@ systemctl restart packetfence-pfconnector-remote In order to upgrade PacketFence Connector, you need to run following commands: +[source,bash] +---- +apt-get update +apt install gnupg sudo curl +curl -fsSL https://inverse.ca/downloads/GPG_PUBLIC_KEY | gpg --dearmor -o /etc/apt/keyrings/packetfence.gpg +---- + +Create a file named [filename]`/etc/apt/sources.list.d/packetfence-pfconnector-remote.list`: + +// subs=attributes allow to substitute {release_minor} in code block [source,bash,subs="attributes"] ---- -echo 'deb http://inverse.ca/downloads/PacketFence/debian/{release_minor} bookworm bookworm' > \ +echo "deb [signed-by=/etc/apt/keyrings/packetfence.gpg] http://inverse.ca/downloads/PacketFence/debian/{release_minor} bookworm bookworm" > \ /etc/apt/sources.list.d/packetfence-pfconnector-remote.list +---- + +Upgrade + +[source,bash,subs="attributes"] +---- apt update apt upgrade ---- From 85586b3a682ce1798b7bf33056a6052a7678bd18 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Tue, 24 Sep 2024 08:40:29 -0400 Subject: [PATCH 012/176] Fix/8302 APT PublicKey Missing (#8309) * Fix PF public key in ISO * Fix root password in iso * Revert "Fix root password in iso". User should be to set it during installation This reverts commit 823c053e42fa569d6807d3c2aae627bee5095888. * Wake up gitlab --- README.md | 1 - ci/debian-installer/postinst-debian-installer.sh | 6 +++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5717102b19e6..3e1c7d6b19f5 100644 --- a/README.md +++ b/README.md @@ -95,4 +95,3 @@ Licensed under the GNU General Public License v2. [mailing_lists]: https://packetfence.org/support/index.html#/community "Community Mailing Lists" - diff --git a/ci/debian-installer/postinst-debian-installer.sh b/ci/debian-installer/postinst-debian-installer.sh index 176757eec69f..0f0cf5084e6e 100644 --- a/ci/debian-installer/postinst-debian-installer.sh +++ b/ci/debian-installer/postinst-debian-installer.sh @@ -7,7 +7,11 @@ apt install packetfence -y sed -i '/^deb cdrom:/s/^/#/' /etc/apt/sources.list sed -i 's/#PermitRootLogin.*/PermitRootLogin yes/g' /etc/ssh/sshd_config sed -i 's/.*inverse\.ca.*//g' /etc/apt/sources.list -echo "deb http://inverse.ca/downloads/PacketFence/debian/${PF_VERSION} bookworm bookworm" > /etc/apt/sources.list.d/packetfence.list +apt-get update +apt install gnupg sudo curl +curl -fsSL https://inverse.ca/downloads/GPG_PUBLIC_KEY | gpg --dearmor -o /etc/apt/keyrings/packetfence.gpg +echo "deb [signed-by=/etc/apt/keyrings/packetfence.gpg] http://inverse.ca/downloads/PacketFence/debian/14.0 bookworm bookworm" > \ + /etc/apt/sources.list.d/packetfence.list echo "SET PASSWORD FOR root@'localhost' = PASSWORD('');" > /tmp/reset-root.sql mkdir /run/mysqld chown mysql: /run/mysqld/ From dddc7f98d3b6c82b29d18e249e96f2005ce39909 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Tue, 24 Sep 2024 08:41:12 -0400 Subject: [PATCH 013/176] Fix/8300 Remove radiusd from building in docker and use the one on repos (#8305) * Clean container/radiusd dockerfile to keep essential and install from repo * Move radiusd to previous stage and change ci dependencies * Update radius version to install * CI wake up --- .gitlab-ci.yml | 64 ++++++++++++++++++++++++++++++----- containers/radiusd/Dockerfile | 58 ++----------------------------- 2 files changed, 58 insertions(+), 64 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4821d4e6c98e..0d5399c60a60 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -630,7 +630,6 @@ pfdeb_based_dev: - "httpd.aaa" - "httpd.admin_dispatcher" - "httpd.webservices" - - "radiusd" - "pfsetacls" - "pfsso" - "pfperl-api" @@ -661,7 +660,20 @@ rad_based_dev: extends: - .build_img_container_job_dev - .build_img_container_devel_rules - needs: ["pfdeb_based_dev"] + needs: ["pfdeb_dev"] + variables: + IMAGE_TAGS: "${CI_COMMIT_REF_SLUG},latest" + parallel: + # /!\ Be sure to update this list in all other matrix /!\ + matrix: + - IMAGE_NAME: + - "radiusd" + +rad_extend_dev: + extends: + - .build_img_container_job_dev + - .build_img_container_devel_rules + needs: ["rad_based_dev"] variables: IMAGE_TAGS: "${CI_COMMIT_REF_SLUG},latest" parallel: @@ -713,7 +725,6 @@ pfdeb_based_br_maint: - "httpd.aaa" - "httpd.admin_dispatcher" - "httpd.webservices" - - "radiusd" - "pfsetacls" - "pfsso" - "pfperl-api" @@ -744,7 +755,20 @@ rad_based_br_maint: extends: - .build_img_container_job_br_maint - .build_img_container_branches_and_maintenance_rules - needs: ["pfdeb_based_br_maint"] + needs: ["pfdeb_br_maint"] + variables: + IMAGE_TAGS: ${CI_COMMIT_REF_SLUG} + parallel: + # /!\ Be sure to update this list in all other matrix /!\ + matrix: + - IMAGE_NAME: + - "radiusd" + +rad_extend_br_maint: + extends: + - .build_img_container_job_br_maint + - .build_img_container_branches_and_maintenance_rules + needs: ["rad_based_br_maint"] variables: IMAGE_TAGS: ${CI_COMMIT_REF_SLUG} parallel: @@ -796,7 +820,6 @@ pfdeb_based_cloud_nac: - "httpd.aaa" - "httpd.admin_dispatcher" - "httpd.webservices" - - "radiusd" - "pfsetacls" - "pfsso" - "pfperl-api" @@ -827,7 +850,20 @@ rad_based_cloud_nac: extends: - .build_img_container_job_cloud_nac - .build_img_container_cloud_nac_rules - needs: ["pfdeb_based_cloud_nac"] + needs: ["pfdeb_cloud_nac"] + variables: + IMAGE_TAGS: ${CI_COMMIT_REF_SLUG}-${CI_PIPELINE_ID} + parallel: + # /!\ Be sure to update this list in all other matrix /!\ + matrix: + - IMAGE_NAME: + - "radiusd" + +rad_extend_cloud_nac: + extends: + - .build_img_container_job_cloud_nac + - .build_img_container_cloud_nac_rules + needs: ["rad_based_cloud_nac"] variables: IMAGE_TAGS: ${CI_COMMIT_REF_SLUG}-${CI_PIPELINE_ID} parallel: @@ -879,7 +915,6 @@ pfdeb_based_rel: - "httpd.aaa" - "httpd.admin_dispatcher" - "httpd.webservices" - - "radiusd" - "pfsetacls" - "pfsso" - "pfperl-api" @@ -910,7 +945,20 @@ rad_based_rel: extends: - .build_img_container_job_rel - .release_only_rules - needs: ["pfdeb_based_rel"] + needs: ["pfdeb_rel"] + variables: + IMAGE_TAGS: ${CI_COMMIT_TAG} + parallel: + # /!\ Be sure to update this list in all other matrix /!\ + matrix: + - IMAGE_NAME: + - "radiusd" + +rad_extend_rel: + extends: + - .build_img_container_job_rel + - .release_only_rules + needs: ["rad_based_rel"] variables: IMAGE_TAGS: ${CI_COMMIT_TAG} parallel: diff --git a/containers/radiusd/Dockerfile b/containers/radiusd/Dockerfile index e42fbe261d7e..6ae212513612 100644 --- a/containers/radiusd/Dockerfile +++ b/containers/radiusd/Dockerfile @@ -1,65 +1,11 @@ -ARG from=debian:bookworm ARG KNK_REGISTRY_URL ARG IMAGE_TAG - -FROM ${from} as build - -ARG DEBIAN_FRONTEND=noninteractive - -# -# Install build tools -# -RUN apt-get -qq update -RUN apt-get -qq install -y devscripts equivs git quilt gcc libcollectdclient-dev - -# -# Create build directory -# -RUN mkdir -p /usr/local/src/repositories -WORKDIR /usr/local/src/repositories - -# -# Shallow clone the FreeRADIUS source -# -ARG source=https://github.com/inverse-inc/freeradius-server.git -ARG release=feature/PacketFence_3.2.6 - -RUN git clone -qq --depth 1 --single-branch --branch ${release} ${source} -WORKDIR freeradius-server - -# -# Install build dependencies -# -RUN git checkout ${release}; \ - if [ -e ./debian/control.in ]; then \ - debian/rules debian/control; \ - fi; \ - echo 'y' | mk-build-deps -irt'apt-get -yV' debian/control - -# -# Build the server -# -# RUN make -j2 deb >/dev/null || make -j2 deb -RUN make -j2 deb - -# -# Clean environment and run the server -# FROM ${KNK_REGISTRY_URL}/pfdebian:${IMAGE_TAG} -# Copy debian packages -COPY --from=build /usr/local/src/repositories/*.deb /tmp/ - -RUN apt-get -qq -y remove freeradius-common - - RUN apt-get -qq update \ && apt-get clean \ - && apt-get -qq install -y /tmp/*.deb \ - && apt-get clean \ - && rm -r /var/lib/apt/lists/* /tmp/*.deb \ - \ - && ln -s /etc/freeradius /etc/raddb + && apt-get -qq install -y freeradius=3:3.2.6+git \ + && apt-get clean WORKDIR /usr/local/pf/ From da4ca4a0efcdb045320308d7a99fe0d361f10f42 Mon Sep 17 00:00:00 2001 From: James Rouzier Date: Tue, 24 Sep 2024 12:09:28 +0000 Subject: [PATCH 014/176] Update .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 920d65af6a1d..94140619ff63 100644 --- a/.gitignore +++ b/.gitignore @@ -49,6 +49,7 @@ conf/mfa.conf conf/proxysql.conf conf/uploads conf/kafka.conf +conf/config.toml db/upgrade-tenant-11.2-12.0.sql bin/pfcmd bin/ntlm_auth_wrapper From ad3eac92a41df2c86abab313bd1b0d81cf269f13 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Tue, 24 Sep 2024 10:25:26 -0400 Subject: [PATCH 015/176] Add upgrade, limitation and information for Debian12 and el8 in documentation (#8313) * Add upgrade, limitation and information for Debian12 and el8 * Fix bullet indentation, fix missing link texts * Add few extra explaination for debian 12 and PacketFence 14.0 * Fix link and add text in link for import export mechanism * Fix el8 documentation for mariadb-backup in pf14.0 * Upgrade EL8 Documentation * update wording * Fix links * Move upgrade from 8.0 to 9.3 in archived upgrade notes * Fix titles and indentations * Fix Title indentation on archived notes --------- Co-authored-by: Darren+Satkunas --- docs/PacketFence_Upgrade_Guide.asciidoc | 622 +++++------------- .../automation_of_upgrades.asciidoc | 12 +- .../export_import_mechanism.asciidoc | 6 +- .../archived_upgrade_notes.asciidoc | 479 +++++++++++++- 4 files changed, 646 insertions(+), 473 deletions(-) diff --git a/docs/PacketFence_Upgrade_Guide.asciidoc b/docs/PacketFence_Upgrade_Guide.asciidoc index 52abcdda2f32..9e8a5d8f30d6 100644 --- a/docs/PacketFence_Upgrade_Guide.asciidoc +++ b/docs/PacketFence_Upgrade_Guide.asciidoc @@ -138,467 +138,6 @@ Follow instructions related to <>, more specifically the <>. -== Upgrading from a version prior to 8.0.0 - - -=== Realms upgrade - - -The way PacketFence detects if the realm is stripped out of the username when performing authentication and authorisation has been moved to the realms. Moreover, it is now configurable based on the context (login on the captive portal or administration interface, as well as when performing authorization in RADIUS 802.1x) - -In order to migrate the configuration, use the following script to help guide you through the migration: - - /usr/local/pf/addons/upgrade/to-8.0-authentication-conf.pl - -=== Fingerbank v2 - - -*Device names* - -Packetfence now uses Fingerbank v2 for improved device profiling. Since this new version brings new device names, a rename of the current data is necessary. - -Rename the current data: - -[source,bash] ----- -/usr/local/pf/addons/upgrade/to-8.0-fingerbank-db-data.pl ----- - -*Mandatory Fingerbank API key* - -Fingerbank no longer releases a signature database and now uses an API for device profiling. In order for device profiling to continue working, there must be a Fingerbank API key configured in PacketFence. - -In order to do so, you should make sure you have the following in `/usr/local/fingerbank/conf/fingerbank.conf` - -NOTE: In order to request an API key, you can visit the following URL: https://api.fingerbank.org/users/register - -``` -[upstream] -api_key=YOUR_API_KEY_GOES_HERE -``` - -WARNING: Fingerbank v1 and v2 *do not* use the same infrastructure. The accounts (API keys) created on fingerbank.inverse.ca before the 8.0 release have been migrated to api.fingerbank.org. Still, you should make sure that you have the correct API key configured in fingerbank.conf by looking at your profile on https://api.fingerbank.org/users/register. If you have a corporate account, then you can safely assume its been migrated, you can email fingerbank@inverse.ca for a confirmation. If you use a Github account and you have tried Fingerbank v2 prior to the PacketFence 8.0 release, *then your API key will be different*. Make sure you update fingerbank.conf in that case. - -If you manage a large scale environment, you'll want to make sure your account can perform an unlimited amount of API requests on Fingerbank so that device profiling works correctly in a consistent way. In order to obtain this, contact fingerbank@inverse.ca. Note that most Inverse customers are entitled to free unlimited usage of the Fingerbank Cloud API. - -=== Changes to the default switch roles - - -The default roles that were returned using "Role by Switch Role" have been removed. If you were relying on them to be returned in the RADIUS response, then you need to add them back in the default switch in the 'Roles' tab. - -The previous values were: - - * `registration`: `registration` - * `isolation`: `isolation` - * `macDetection`: `macDetection` - * `inline`: `inline` - * `voice`: `voice` - -This is should only be necessary if you are using ACL assignment on your switches and using the default names that were there in PacketFence before. - -=== Removal of the graphite database - - -PacketFence doesn't use graphite anymore for its dashboard. It is recommended to delete the graphite database although this is purely optional. - -In order to do so, execute the following: - - mysql -u root -p -e "drop database pf_graphite" - -=== Changes to DNS filters - - -The $qname parameter need to be removed from dns_filters.conf - -In order to do so, execute the following command: - - sed -i -e 's/\$qname//g' /usr/local/pf/conf/dns_filters.conf - -=== Database schema update (all Linux distributions) - - -Changes have been made to the database schema. You will need to update it accordingly. -An SQL upgrade script has been provided to upgrade the database from the 7.4 schema to 8.0. - -To upgrade the database schema, run the following command: - - mysql -u root -p pf -v < /usr/local/pf/db/upgrade-7.4.0-8.0.0.sql - -Once completed, update the file /usr/local/pf/conf/currently-at to match the new release number (PacketFence 8.0.0). - -== Upgrading from a version prior to 8.1.0 - - -=== Changes on unreg_on_accounting_stop parameter - - -The global configuration parameter unreg_on_acct_stop has been moved in the connection profile. -So if you enabled it then make sure to enable it now in the connection profile. - - -=== Database schema update (all Linux distributions) - - -Changes have been made to the database schema. You will need to update it accordingly. -An SQL upgrade script has been provided to upgrade the database from the 7.4 schema to 8.0. - -To upgrade the database schema, run the following command: - - mysql -u root -p pf -v < /usr/local/pf/db/upgrade-8.0.0-8.1.0.sql - -Once completed, update the file /usr/local/pf/conf/currently-at to match the new release number (PacketFence 8.1.0). - -== Upgrading from a version prior to 8.2.0 - - -=== Queue Stats maintenance job removal - -The queue_stats maintenance job has been deprecated in favor of using pfstats. In order to remove configuration related to this maintenance job, run: - - /usr/local/pf/addons/upgrade/to-8.2-pfmon-conf.pl - -=== Upgrade pfdetect Perl regex to the go RE2 regex - -The pfdetect was moved from Perl to Go so all rule regexes have to be converted to the RE2 regex syntax. -RE2 is mostly is compatiable the Perl regex syntax. -More information on the RE2 syntax can be found here https://github.com/google/re2/wiki/Syntax. -To upgrade the regex run: - - /usr/local/pf/addons/upgrade/to-8.2-pfdetect-conf.pl - -Any Perl regex that cannnot be convert will be displayed and should be fixed. - -=== Upgrade realm.conf to be tenant aware - - -The realms are now multi-tenant aware, in order to upgrade your configuration to have the existing realms use the default tenant, execute the following script: - - /usr/local/pf/addons/upgrade/to-8.2-realm-conf.pl - - -=== The api_user table has been deprecated - - -Any users in that were in the api_user table should be migrated to PacketFence local account (password table) - -=== Upgrade pf user privileges - - -Starting from 8.2, stored routines will be dump *with* the PacketFence database. -The user created at the installation ('pf' by default) in database need to -have additional privileges to do that task. - -To upgrade the privileges of that user, run the following command: - - /usr/local/pf/addons/upgrade/to-8.2-upgrade-pf-privileges.sh - -=== Update connection_type from WIRED_MAC_AUTH to Ethernet-NoEAP - -We merged the WIRED_MAC_AUTH and Ethernet-NoEAP to Ethernet-NoEAP so the configuration needs to be updated, to do that run: - - sed -i "s/WIRED_MAC_AUTH/Ethernet-NoEAP/g" /usr/local/pf/conf/profiles.conf /usr/local/pf/conf/vlan_filters.conf /usr/local/pf/conf/radius_filters.conf /usr/local/pf/conf/switch_filters.conf /usr/local/pf/conf/authentication.conf - -=== Database schema - - -Changes have been made to the database schema. You will need to update it accordingly. -An SQL upgrade script has been provided to upgrade the database from the 8.1 schema to 8.2. - -To upgrade the database schema, run the following command: - - mysql -u root -p pf -v < /usr/local/pf/db/upgrade-8.1.0-8.2.0.sql - -Once completed, update the file /usr/local/pf/conf/currently-at to match the new release number (PacketFence 8.2.0). - -== Upgrading from a version prior to 8.3.0 - - -=== Upgrade pf.conf to rename configuration parameters - - -We moved radius_authentication_methods section to radius_configuration and moved all the radius configuration parameters in this new section. -To upgrade your configuration execute the following script: - - /usr/local/pf/addons/upgrade/to-8.3-rename-pf-conf-parameters.pl - - -=== Upgrade authentication.conf to add searchattributes parameter - - -We added a new parameter in AD and LDAP authentication sources to be able to do 802.1x authentication with any unique ldap attributes. -This parameter "searchattributes" need to be added in the existing authentication sources. -To apply this configuration execute the following script: - - /usr/local/pf/addons/upgrade/to-8.3-authentication-searchattributes.pl - -=== Adjustment to the encoding of the configuration files and templates - - -Configuration and templates in the admin were previously being saved as latin1 instead of utf8. - -This script will convert all latin1 config file to utf8 - - /usr/local/pf/addons/upgrade/to-8.3-conf-latin1-to-utf8.sh - -=== Database schema - - -Changes have been made to the database schema. You will need to update it accordingly. -An SQL upgrade script has been provided to upgrade the database from the 8.2 schema to 8.3. - -To upgrade the database schema, run the following command: - - mysql -u root -p pf -v < /usr/local/pf/db/upgrade-8.2.0-8.3.0.sql - -Once completed, update the file /usr/local/pf/conf/currently-at to match the new release number (PacketFence 8.3.0). - -== Upgrading from a version prior to 9.0.0 - - -=== Support for Debian 8 dropped - - -Debian 8 will not be supported anymore for versions 9.0.0 and above. You should instead use Debian 9 now as it is currently the only supported Debian version. - -=== Necessity to use MariaDB - - -NOTE: This only applies to users using an external database server. If your database is hosted on the same server as PacketFence whether you are in cluster or standalone, this requires no attention. - -Users hosting an external database for PacketFence will need to run a recent version of MariaDB as it will be the only supported database backend. Failure to use MariaDB may result in errors in the database migration script. - -In order to migrate to MariaDB, it is suggested to create a new database server and perform an export of the data through mysqldump and import it in the new server. - -The recommended MariaDB version for PacketFence is currently 10.1.21 - -A recent version of MySQL can also work but going forward, the only tested database engine will be MariaDB. - -=== Deprecate the classic dhcp filters - - -The previous dhcp filters engine has been deprecated in favor of the new one who is able to modify -the dhcp answer on the fly. - -=== Violations have been renamed to Security Events - - -The violations have been renamed to security events. In order to make the appropriate changes in your configuration, execute the following script: - - /usr/local/pf/addons/upgrade/to-9.0-security-events.sh - -=== Removed MAC detection setting - - -The MAC detection setting in the switches has been removed. In order to cleanup the switches configuration for the removal of this setting, execute the following script: - - /usr/local/pf/addons/upgrade/to-9.0-remove_mac_detection.sh - -=== Modifications to accounting cleanup - - -Accounting cleanup is now done via a pfmon task (acct_cleanup) instead of the database backup and maintenance script. Make sure you adjust the cleanup window in pfmon's configuration (Configuration->System Maintenance->Maintenance) if necessary. Also note that the default retention for the accounting data has been lowered to 1 day instead of 1 week like it was before. - -=== Admin roles configuration - - -In order to upgrade the Admin rights, run the following commands - - cd /usr/local/pf - sed -i "s/SERVICES/SERVICES_READ/g" /usr/local/pf/conf/adminroles.conf - sed -i "s/REPORTS/REPORTS_READ/g" /usr/local/pf/conf/adminroles.conf - -=== Database schema - - -Changes have been made to the database schema. You will need to update it accordingly. -An SQL upgrade script has been provided to upgrade the database from the 8.3 schema to 9.0. - -To upgrade the database schema, run the following command: - - mysql -u root -p pf -v < /usr/local/pf/db/upgrade-8.3.0-9.0.0.sql - -Once completed, update the file /usr/local/pf/conf/currently-at to match the new release number (PacketFence 9.0.0). - - -== Upgrading from a version prior to 9.1.0 - - -=== Now possible to disable a domain - - -In order to add the necessary enabled flag to your existing domains, run the following command: - - /usr/local/pf/addons/upgrade/to-9.1-add-domain-conf.pl - -=== pfperl-api port - - -The port of the pfperl-api service has changed, in order to adjust the existing configuration, run the following command: - - /usr/local/pf/addons/upgrade/to-9.1-update-api.conf.sh - -=== Linkedin OAuth2 - - -The LinkedIn API calls have changed drastically. -On top of the new LinkedIn modules that are part of the update, you will need to change the following parameter in all your existing LinkedIn sources: - - API URL of logged user -> https://api.linkedin.com/v2/emailAddress?q=members&projection=(elements*(handle~)) - -=== VLAN pool configuration - - -The VLAN pool strategy configuration has been moved to the connection profiles. - -In order to migrate the current setting of pf.conf into profiles.conf, you will need to run the following command: - - /usr/local/pf/addons/upgrade/to-9.1-move-vlan-pool-technique-parameter.pl - -=== Remove Useragent Triggers - - -The useragent and user_agent security event triggers have been deprecated. Performing HTTP User-Agent based detection is extremelly inefficient given the very dynamic nature of HTTP User-Agents. You should instead be using the device trigger which leverages the device profiling performed by Fingerbank. In order to remove any existing useragent trigger, execute the following script: - - /usr/local/pf/addons/upgrade/to-9.1-security-events-remove-useragent.pl - -=== Self service portal - -The device registration configuration file has been removed in favor of using a configuration file for all the self service portal features (status page + device registration). - -In order to migrate your configuration, run the following script: - -[source,bash] ----- -/usr/local/pf/addons/upgrade/to-9.1-selfservice-conf.pl ----- - -=== Password of the day rotation - -Password of the day source now uses access duration values to rotate password. - -In order to migrate your configuration, run the following script: - -[source,bash] ----- -/usr/local/pf/addons/upgrade/to-9.1-update-potd.pl ----- - - -=== Database schema - - -Changes have been made to the database schema. You will need to update it accordingly. -An SQL upgrade script has been provided to upgrade the database from the 9.0 schema to 9.1. - -To upgrade the database schema, run the following command: - - mysql -u root -p pf -v < /usr/local/pf/db/upgrade-9.0.0-9.1.0.sql - -Once completed, update the file /usr/local/pf/conf/currently-at to match the new release number (PacketFence 9.1.0). - -== Upgrading from a version prior to 9.2.0 - - -=== Merge of all RPM packages into one (RHEL / CentOS only) - -NOTE: This step needs to be done *before* packages upgrade. - -Starting from now, PacketFence will be released as an unique RPM package for -`x86_64` architectures. To remove properly older RPM packages, you need to follow these steps: - -. Follow instructions mentioned in <<_stop_all_packetfence_services,Stop all PacketFence services>> section and stop before starting packages upgrades -. Uninstall old RPM without running post-uninstallation steps: -+ -[source,bash] ----- -rpm -e --nodeps --noscripts packetfence-config - -# run only if packetfence-remote-arp-sensor has been installed -rpm -e --nodeps --noscripts packetfence-remote-arp-sensor ----- -+ - -. Recopy previous [filename]`pfconfig.conf` filename to its original location: -+ -[source,bash] ----- -mv -f /usr/local/pf/conf/pfconfig.conf.rpmsave /usr/local/pf/conf/pfconfig.conf ----- -+ - -. Upgrade PacketFence packages by following instructions in <<_packages_upgrades,Packages upgrades>> section for RHEL / CentOS based systems -. Continue upgrade procedure - -At the end of upgrade procedure, you should have only one RPM package called -[package]`packetfence`. If you previously installed -[package]`packetfence-release` package in order to have PacketFence repository -installed, this one has been upgraded to latest version. - -=== New GPG key for Debian installations (Debian only) - -NOTE: This step needs to be done *before* packages upgrade. - -In order to install new versions of Debian packages, you will need to add a new GPG key to your system: - -[source,bash] ----- -wget -O - https://inverse.ca/downloads/GPG_PUBLIC_KEY | sudo apt-key add - ----- - -You can safely remove the oldest one: -[source,bash] ----- -sudo apt-key del FE9E84327B18FF82B0378B6719CDA6A9810273C4 ----- - -=== Database schema - -Changes have been made to the database schema. You will need to update it accordingly. -An SQL upgrade script has been provided to upgrade the database from the 9.1 schema to 9.2. - -To upgrade the database schema, run the following command: - -[source,bash] ----- -mysql -u root -p pf -v < /usr/local/pf/db/upgrade-9.1.0-9.2.0.sql ----- - -Once completed, update the file [filename]`/usr/local/pf/conf/currently-at` to match the new release number (PacketFence 9.2.0): - -[source,bash] ----- -cat /usr/local/pf/conf/pf-release > /usr/local/pf/conf/currently-at ----- - -== Upgrading from a version prior to 9.3.0 - -=== Execute script action doesn't use sudo anymore - -Execute script action in security events doesn't use [command]`sudo` anymore to run scripts. -Consequently, you should ensure that `pf` user is: - -* able to read and execute these scripts -* able to run commands inside these scripts (without `sudo`) - -=== Database schema - -Changes have been made to the database schema. You will need to update it accordingly. -An SQL upgrade script has been provided to upgrade the database from the 9.2 schema to 9.3. - -To upgrade the database schema, run the following command: - -[source,bash] ----- -mysql -u root -p pf -v < /usr/local/pf/db/upgrade-9.2.0-9.3.0.sql ----- - -Once completed, update the file [filename]`/usr/local/pf/conf/currently-at` to match the new release number (PacketFence 9.3.0): - -[source,bash] ----- -cat /usr/local/pf/conf/pf-release > /usr/local/pf/conf/currently-at ----- - == Upgrading from a version prior to 10.0.0 === Kernel development package @@ -1928,16 +1467,175 @@ Since 13.2 PacketFence has reworked the Cisco, Juniper and Meraki switch modules == Upgrading from a version prior to 14.0.0 - === Admin Role Since 14.0 PacketFence is able to receive events from the FleetDM servers, which allows PacketFence to detect policy violations or CVEs of devices managed by FleetDM. To add a new admin role to receive these events through the PacketFence API perform the following steps: [source,bash] + ---- /usr/local/pf/addons/upgrade/to-14.0-adds-admin-roles-fleetdm.pl ---- +=== RedHat EL8 + +In place upgrades are supported for Redhat EL8. You can follow up the current <>. + +=== Debian 12 + +PacketFence 14.0.0 has removed support for Debian 11 (bullseye) and added support for Debian 12 (bookworm). In place upgrades from Debian 11 to Debian 12 are not supported. A new operating system will need to be provisioned in order to migrate from either Debian 11 or RedHat EL8, to Debian 12. + +To simplify the upgrade process to PacketFence 14.0.0 and future versions, we utilize a custom export/import procedure. + +The mariadb-backup package is installed with a PacketFence cluster and can also be used with standalone. The mariadb-backup package should have the same major version as the mariadb-server package. + +To know which package version of mariadb-backup is installed: + +---- +# Debian 11 +# /usr/bin/mariabackup --version +/usr/bin/mariabackup based on MariaDB server 10.5.24-MariaDB debian-linux-gnu (x86_64) + +# Debian 12 +# /usr/bin/mariabackup --version +/usr/bin/mariabackup based on MariaDB server 10.11.6-MariaDB debian-linux-gnu (x86_64) +---- + +If it is not installed follow the default export process at <>. + +Before continuing, be sure to read <>. + +==== Export database with mariadb-backup and Import to PacketFence 14.0 on Debian 12 + +PacketFence versions < 11.1 must upgrade to 11.1 before continuing. + +===== On Debian 11 from PacketFence version 11.1 to 13.2 + +Exporting from Debian 11 the mariadb-backup package (10.5) must be first updated to the current major version used in Debian 12 (10.11) in order to be Imported. + + +====== Install mariabd-backup version 10.11.6 + +---- +wget https://archive.mariadb.org/mariadb-10.11.6/repo/debian/pool/main/m/mariadb/mariadb-backup_10.11.6%2Bmaria~deb11_amd64.deb -O /root/mariadb-backup_10.11.6%2Bmaria~deb11_amd64.deb +dpkg-deb -xv /root/mariadb-backup_10.11.6%2Bmaria~deb11_amd64.deb /root/mariadb-backup +mv /root/mariadb-backup/usr/bin/mariabackup /usr/bin/mariabackup +mv /root/mariadb-backup/usr/bin/mbstream /usr/bin/mbstream +---- + +====== Backup database + +Backup using the following script where the database export is created using mariadb-backup (10.11) + +---- +/usr/local/pf/addons/backup-and-maintenance.sh +---- + + +====== Reinstall previous version of mariadb-backup (10.5) + +The new version of maria-backup can now be removed + +---- +apt reinstall mariadb-backup -y +---- + + +====== Export the backup + +---- +/usr/local/pf/addons/full-import/export.sh /tmp/export.tgz +---- + +====== Import the backup + +Import the backup on the new host using the <> procedure. + + +===== On RedHat EL8 from PacketFence version 11.1 to 13.2 + +====== Backup database + +Backup using the following script where the database export is created using mariadb-backup (10.5). This backup is used to Import the database in the new host. + +---- +/usr/local/pf/addons/backup-and-maintenance.sh +---- + +Ensure the backup exists in /root/backup/packetfence-db-dump-innobackup-YYYY-MM-DD_HHhmm.xbstream.gz + + +====== Export the configuration + +This export is only used to Import the configuration files in the new host. + +---- +/usr/local/pf/addons/full-import/export.sh /tmp/export.tgz +---- + + +====== Prepare backup on Debian 11 + +Restore the database backup into a new copy for mariabackup. + +---- +gunzip packetfence-db-dump-innobackup-YYYY-MM-DD_HHhmm.xbstream.gz +mkdir -p /root/backup/restore/ +pushd /root/backup/restore/ +mv packetfence-db-dump-innobackup-YYYY-MM-DD_HHhmm.xbstream /root/backup/restore/ +cd /root/backup/restore/ +mbstream -x < packetfence-db-dump-innobackup-YYYY-MM-DD_HHhmm.xbstream +rm packetfence-db-dump-innobackup-YYYY-MM-DD_HHhmm.xbstream +mariabackup --prepare --target-dir=./ +---- + + => SCP (copy) the restored files to the Debian 12 server + +---- +# create the restore directory +ssh root@PACKTFENCE mkdir -p /root/backup/restore/ +scp /root/backup/restore/* root@DEBIAN_12_IP:/root/backup/restore/ +---- + + +====== Import the backup on Debian 12 + +---- +systemctl stop packetfence-mariadb +pkill -9 -f mariadbd || echo 1 > /dev/null +mv /var/lib/mysql/ "/var/lib/mysql-`date +%s`" +mkdir /var/lib/mysql +cd /root/backup/restore/ +mariabackup --innobackupex --defaults-file=/usr/local/pf/var/conf/mariadb.conf --move-back --force-non-empty-directories ./ +chown -R mysql: /var/lib/mysql +systemctl start packetfence-mariadb +mysql_upgrade -p +systemctl restart packetfence-mariadb +---- + + +====== Import the configuration files + +Import only the configuration files, do not import the database. + +---- +/usr/local/pf/addons/full-import/import.sh --conf -f /tmp/export.tgz +---- + +The configuration and database is now migrated to the new host. + +If all goes well, you can restart services using <>. + + +====== Additional steps to build or rebuild a cluster + +If you want to build or rebuild a cluster, you need to follow instructions in <>. + +If your previous installation was a cluster, some steps may not be necessary +to do. Your export archive will contain your previous +[filename]`cluster.conf` file. + + == Upgrading from a version prior to X.Y.Z == Archived upgrade notes diff --git a/docs/installation/automation_of_upgrades.asciidoc b/docs/installation/automation_of_upgrades.asciidoc index 89b03131a1bc..b514986bf3dd 100644 --- a/docs/installation/automation_of_upgrades.asciidoc +++ b/docs/installation/automation_of_upgrades.asciidoc @@ -18,7 +18,12 @@ endif::[] //== Automation of upgrades -This section covers automation of upgrades available since PacketFence 11.0.0. +This section covers automation of upgrades available since PacketFence 11.0.0 on the same OS. + +Note that on Packetfence 14.0.0, we change the OS from Debian 11 to Debian 12. +So if you want to use PacketFence 14.0 on Debian 12, you will have to create a new Debian 12 VM +and then <> +from your previous setup to the new one. === Assumptions and limitations @@ -26,7 +31,10 @@ This section covers automation of upgrades available since PacketFence 11.0.0. upgrades must use the procedure described in the <> -* You can perform automated upgrades starting from PacketFence 11.0.0 +* You can perform automated upgrades starting: +** from PacketFence 11.0.0 to PacketFence 13.2.0 on Debian 11 +** after PacketFence 14.0.0 on Debian 12 +** after PacketFence 11.0.0 on RedHat 8 * A backup and an export of your configuration are performed before doing upgrade === Full upgrade (for PacketFence version 11.0.0 only - see next section for 11.1.0 and above) diff --git a/docs/installation/export_import_mechanism.asciidoc b/docs/installation/export_import_mechanism.asciidoc index 3796294ff99b..c1bcf76586fd 100644 --- a/docs/installation/export_import_mechanism.asciidoc +++ b/docs/installation/export_import_mechanism.asciidoc @@ -24,7 +24,11 @@ It can be used to automate parts of upgrades or to restore PacketFence installat === Assumptions and limitations * You can export on any PacketFence version above 10.3.0 -* You can import on any PacketFence version above 11.0.0 +* With export from 10.3.0, you can import on any PacketFence version after 11.0.0 except if you are using mariadb-backup for mysql backup. +* If you are using mariabd-backup and want to jump to Debian 12 (packetfence 14.0 or later), you will need to: +** be at least on Packetfence version 11.0.0 +** follow these steps in order to backup your database with the right + <>. * The import process needs to be done on a **standalone** server. Restoring directly to clusters is currently unsupported ** NOTE: Once you restored to your standalone server, you can make it a cluster by joining other machines to it and creating your diff --git a/docs/upgrade-notes/archived_upgrade_notes.asciidoc b/docs/upgrade-notes/archived_upgrade_notes.asciidoc index 256d92692f1e..9626ce1571a6 100644 --- a/docs/upgrade-notes/archived_upgrade_notes.asciidoc +++ b/docs/upgrade-notes/archived_upgrade_notes.asciidoc @@ -1335,14 +1335,14 @@ Then, you must adjust the systemd default target so PacketFence doesn't start on You should then change your Corosync configuration for MariaDB and PacketFence to the following: - primitive MariaDB systemd:packetfence-mariadb \ - op start timeout=60s interval=0 \ - op stop timeout=60s interval=0 \ - op monitor interval=20s timeout=30s - primitive PacketFence systemd:packetfence \ - op start timeout=300s interval=0 \ - op stop timeout=300s interval=0 \ - op monitor interval=300s timeout=300s + primitive MariaDB systemd:packetfence-mariadb \ + op start timeout=60s interval=0 \ + op stop timeout=60s interval=0 \ + op monitor interval=20s timeout=30s + primitive PacketFence systemd:packetfence \ + op start timeout=300s interval=0 \ + op stop timeout=300s interval=0 \ + op monitor interval=300s timeout=300s === Upgrading from a version prior to 7.1.0 @@ -1497,3 +1497,466 @@ To upgrade the database schema, run the following command: mysql -u root -p pf -v < /usr/local/pf/db/upgrade-7.3.0-7.4.0.sql Once completed, update the file /usr/local/pf/conf/currently-at to match the new release number (PacketFence 7.4.0). + +=== Upgrading from a version prior to 8.0.0 + + +==== Realms upgrade + + +The way PacketFence detects if the realm is stripped out of the username when performing authentication and authorisation has been moved to the realms. Moreover, it is now configurable based on the context (login on the captive portal or administration interface, as well as when performing authorization in RADIUS 802.1x) + +In order to migrate the configuration, use the following script to help guide you through the migration: + + /usr/local/pf/addons/upgrade/to-8.0-authentication-conf.pl + +==== Fingerbank v2 + + +*Device names* + +Packetfence now uses Fingerbank v2 for improved device profiling. Since this new version brings new device names, a rename of the current data is necessary. + +Rename the current data: + +[source,bash] +---- +/usr/local/pf/addons/upgrade/to-8.0-fingerbank-db-data.pl +---- + +*Mandatory Fingerbank API key* + +Fingerbank no longer releases a signature database and now uses an API for device profiling. In order for device profiling to continue working, there must be a Fingerbank API key configured in PacketFence. + +In order to do so, you should make sure you have the following in `/usr/local/fingerbank/conf/fingerbank.conf` + +NOTE: In order to request an API key, you can visit the following URL: https://api.fingerbank.org/users/register + +``` +[upstream] +api_key=YOUR_API_KEY_GOES_HERE +``` + +WARNING: Fingerbank v1 and v2 *do not* use the same infrastructure. The accounts (API keys) created on fingerbank.inverse.ca before the 8.0 release have been migrated to api.fingerbank.org. Still, you should make sure that you have the correct API key configured in fingerbank.conf by looking at your profile on https://api.fingerbank.org/users/register. If you have a corporate account, then you can safely assume its been migrated, you can email fingerbank@inverse.ca for a confirmation. If you use a Github account and you have tried Fingerbank v2 prior to the PacketFence 8.0 release, *then your API key will be different*. Make sure you update fingerbank.conf in that case. + +If you manage a large scale environment, you'll want to make sure your account can perform an unlimited amount of API requests on Fingerbank so that device profiling works correctly in a consistent way. In order to obtain this, contact fingerbank@inverse.ca. Note that most Inverse customers are entitled to free unlimited usage of the Fingerbank Cloud API. + +==== Changes to the default switch roles + + +The default roles that were returned using "Role by Switch Role" have been removed. If you were relying on them to be returned in the RADIUS response, then you need to add them back in the default switch in the 'Roles' tab. + +The previous values were: + + * `registration`: `registration` + * `isolation`: `isolation` + * `macDetection`: `macDetection` + * `inline`: `inline` + * `voice`: `voice` + +This is should only be necessary if you are using ACL assignment on your switches and using the default names that were there in PacketFence before. + +==== Removal of the graphite database + + +PacketFence doesn't use graphite anymore for its dashboard. It is recommended to delete the graphite database although this is purely optional. + +In order to do so, execute the following: + + mysql -u root -p -e "drop database pf_graphite" + +==== Changes to DNS filters + + +The $qname parameter need to be removed from dns_filters.conf + +In order to do so, execute the following command: + + sed -i -e 's/\$qname//g' /usr/local/pf/conf/dns_filters.conf + +==== Database schema update (all Linux distributions) + + +Changes have been made to the database schema. You will need to update it accordingly. +An SQL upgrade script has been provided to upgrade the database from the 7.4 schema to 8.0. + +To upgrade the database schema, run the following command: + + mysql -u root -p pf -v < /usr/local/pf/db/upgrade-7.4.0-8.0.0.sql + +Once completed, update the file /usr/local/pf/conf/currently-at to match the new release number (PacketFence 8.0.0). + +=== Upgrading from a version prior to 8.1.0 + + +==== Changes on unreg_on_accounting_stop parameter + + +The global configuration parameter unreg_on_acct_stop has been moved in the connection profile. +So if you enabled it then make sure to enable it now in the connection profile. + + +==== Database schema update (all Linux distributions) + + +Changes have been made to the database schema. You will need to update it accordingly. +An SQL upgrade script has been provided to upgrade the database from the 7.4 schema to 8.0. + +To upgrade the database schema, run the following command: + + mysql -u root -p pf -v < /usr/local/pf/db/upgrade-8.0.0-8.1.0.sql + +Once completed, update the file /usr/local/pf/conf/currently-at to match the new release number (PacketFence 8.1.0). + +=== Upgrading from a version prior to 8.2.0 + + +==== Queue Stats maintenance job removal + +The queue_stats maintenance job has been deprecated in favor of using pfstats. In order to remove configuration related to this maintenance job, run: + + /usr/local/pf/addons/upgrade/to-8.2-pfmon-conf.pl + +==== Upgrade pfdetect Perl regex to the go RE2 regex + +The pfdetect was moved from Perl to Go so all rule regexes have to be converted to the RE2 regex syntax. +RE2 is mostly is compatiable the Perl regex syntax. +More information on the RE2 syntax can be found here https://github.com/google/re2/wiki/Syntax. +To upgrade the regex run: + + /usr/local/pf/addons/upgrade/to-8.2-pfdetect-conf.pl + +Any Perl regex that cannnot be convert will be displayed and should be fixed. + +==== Upgrade realm.conf to be tenant aware + + +The realms are now multi-tenant aware, in order to upgrade your configuration to have the existing realms use the default tenant, execute the following script: + + /usr/local/pf/addons/upgrade/to-8.2-realm-conf.pl + + +==== The api_user table has been deprecated + + +Any users in that were in the api_user table should be migrated to PacketFence local account (password table) + +==== Upgrade pf user privileges + + +Starting from 8.2, stored routines will be dump *with* the PacketFence database. +The user created at the installation ('pf' by default) in database need to +have additional privileges to do that task. + +To upgrade the privileges of that user, run the following command: + + /usr/local/pf/addons/upgrade/to-8.2-upgrade-pf-privileges.sh + +==== Update connection_type from WIRED_MAC_AUTH to Ethernet-NoEAP + +We merged the WIRED_MAC_AUTH and Ethernet-NoEAP to Ethernet-NoEAP so the configuration needs to be updated, to do that run: + + sed -i "s/WIRED_MAC_AUTH/Ethernet-NoEAP/g" /usr/local/pf/conf/profiles.conf /usr/local/pf/conf/vlan_filters.conf /usr/local/pf/conf/radius_filters.conf /usr/local/pf/conf/switch_filters.conf /usr/local/pf/conf/authentication.conf + +==== Database schema + + +Changes have been made to the database schema. You will need to update it accordingly. +An SQL upgrade script has been provided to upgrade the database from the 8.1 schema to 8.2. + +To upgrade the database schema, run the following command: + + mysql -u root -p pf -v < /usr/local/pf/db/upgrade-8.1.0-8.2.0.sql + +Once completed, update the file /usr/local/pf/conf/currently-at to match the new release number (PacketFence 8.2.0). + +=== Upgrading from a version prior to 8.3.0 + + +==== Upgrade pf.conf to rename configuration parameters + + +We moved radius_authentication_methods section to radius_configuration and moved all the radius configuration parameters in this new section. +To upgrade your configuration execute the following script: + + /usr/local/pf/addons/upgrade/to-8.3-rename-pf-conf-parameters.pl + + +==== Upgrade authentication.conf to add searchattributes parameter + + +We added a new parameter in AD and LDAP authentication sources to be able to do 802.1x authentication with any unique ldap attributes. +This parameter "searchattributes" need to be added in the existing authentication sources. +To apply this configuration execute the following script: + + /usr/local/pf/addons/upgrade/to-8.3-authentication-searchattributes.pl + +==== Adjustment to the encoding of the configuration files and templates + + +Configuration and templates in the admin were previously being saved as latin1 instead of utf8. + +This script will convert all latin1 config file to utf8 + + /usr/local/pf/addons/upgrade/to-8.3-conf-latin1-to-utf8.sh + +==== Database schema + + +Changes have been made to the database schema. You will need to update it accordingly. +An SQL upgrade script has been provided to upgrade the database from the 8.2 schema to 8.3. + +To upgrade the database schema, run the following command: + + mysql -u root -p pf -v < /usr/local/pf/db/upgrade-8.2.0-8.3.0.sql + +Once completed, update the file /usr/local/pf/conf/currently-at to match the new release number (PacketFence 8.3.0). + +=== Upgrading from a version prior to 9.0.0 + + +==== Support for Debian 8 dropped + + +Debian 8 will not be supported anymore for versions 9.0.0 and above. You should instead use Debian 9 now as it is currently the only supported Debian version. + +==== Necessity to use MariaDB + + +NOTE: This only applies to users using an external database server. If your database is hosted on the same server as PacketFence whether you are in cluster or standalone, this requires no attention. + +Users hosting an external database for PacketFence will need to run a recent version of MariaDB as it will be the only supported database backend. Failure to use MariaDB may result in errors in the database migration script. + +In order to migrate to MariaDB, it is suggested to create a new database server and perform an export of the data through mysqldump and import it in the new server. + +The recommended MariaDB version for PacketFence is currently 10.1.21 + +A recent version of MySQL can also work but going forward, the only tested database engine will be MariaDB. + +==== Deprecate the classic dhcp filters + + +The previous dhcp filters engine has been deprecated in favor of the new one who is able to modify +the dhcp answer on the fly. + +==== Violations have been renamed to Security Events + + +The violations have been renamed to security events. In order to make the appropriate changes in your configuration, execute the following script: + + /usr/local/pf/addons/upgrade/to-9.0-security-events.sh + +==== Removed MAC detection setting + + +The MAC detection setting in the switches has been removed. In order to cleanup the switches configuration for the removal of this setting, execute the following script: + + /usr/local/pf/addons/upgrade/to-9.0-remove_mac_detection.sh + +==== Modifications to accounting cleanup + + +Accounting cleanup is now done via a pfmon task (acct_cleanup) instead of the database backup and maintenance script. Make sure you adjust the cleanup window in pfmon's configuration (Configuration->System Maintenance->Maintenance) if necessary. Also note that the default retention for the accounting data has been lowered to 1 day instead of 1 week like it was before. + +==== Admin roles configuration + + +In order to upgrade the Admin rights, run the following commands + + cd /usr/local/pf + sed -i "s/SERVICES/SERVICES_READ/g" /usr/local/pf/conf/adminroles.conf + sed -i "s/REPORTS/REPORTS_READ/g" /usr/local/pf/conf/adminroles.conf + +==== Database schema + + +Changes have been made to the database schema. You will need to update it accordingly. +An SQL upgrade script has been provided to upgrade the database from the 8.3 schema to 9.0. + +To upgrade the database schema, run the following command: + + mysql -u root -p pf -v < /usr/local/pf/db/upgrade-8.3.0-9.0.0.sql + +Once completed, update the file /usr/local/pf/conf/currently-at to match the new release number (PacketFence 9.0.0). + + +=== Upgrading from a version prior to 9.1.0 + + +==== Now possible to disable a domain + + +In order to add the necessary enabled flag to your existing domains, run the following command: + + /usr/local/pf/addons/upgrade/to-9.1-add-domain-conf.pl + +==== pfperl-api port + + +The port of the pfperl-api service has changed, in order to adjust the existing configuration, run the following command: + + /usr/local/pf/addons/upgrade/to-9.1-update-api.conf.sh + +==== Linkedin OAuth2 + + +The LinkedIn API calls have changed drastically. +On top of the new LinkedIn modules that are part of the update, you will need to change the following parameter in all your existing LinkedIn sources: + + API URL of logged user -> https://api.linkedin.com/v2/emailAddress?q=members&projection=(elements*(handle~)) + +==== VLAN pool configuration + + +The VLAN pool strategy configuration has been moved to the connection profiles. + +In order to migrate the current setting of pf.conf into profiles.conf, you will need to run the following command: + + /usr/local/pf/addons/upgrade/to-9.1-move-vlan-pool-technique-parameter.pl + +==== Remove Useragent Triggers + + +The useragent and user_agent security event triggers have been deprecated. Performing HTTP User-Agent based detection is extremelly inefficient given the very dynamic nature of HTTP User-Agents. You should instead be using the device trigger which leverages the device profiling performed by Fingerbank. In order to remove any existing useragent trigger, execute the following script: + + /usr/local/pf/addons/upgrade/to-9.1-security-events-remove-useragent.pl + +==== Self service portal + +The device registration configuration file has been removed in favor of using a configuration file for all the self service portal features (status page + device registration). + +In order to migrate your configuration, run the following script: + +[source,bash] +---- +/usr/local/pf/addons/upgrade/to-9.1-selfservice-conf.pl +---- + +==== Password of the day rotation + +Password of the day source now uses access duration values to rotate password. + +In order to migrate your configuration, run the following script: + +[source,bash] +---- +/usr/local/pf/addons/upgrade/to-9.1-update-potd.pl +---- + + +==== Database schema + + +Changes have been made to the database schema. You will need to update it accordingly. +An SQL upgrade script has been provided to upgrade the database from the 9.0 schema to 9.1. + +To upgrade the database schema, run the following command: + + mysql -u root -p pf -v < /usr/local/pf/db/upgrade-9.0.0-9.1.0.sql + +Once completed, update the file /usr/local/pf/conf/currently-at to match the new release number (PacketFence 9.1.0). + +=== Upgrading from a version prior to 9.2.0 + + +==== Merge of all RPM packages into one (RHEL / CentOS only) + +NOTE: This step needs to be done *before* packages upgrade. + +Starting from now, PacketFence will be released as an unique RPM package for +`x86_64` architectures. To remove properly older RPM packages, you need to follow these steps: + +. Follow instructions mentioned in <<_stop_all_packetfence_services,Stop all PacketFence services>> section and stop before starting packages upgrades +. Uninstall old RPM without running post-uninstallation steps: ++ +[source,bash] +---- +rpm -e --nodeps --noscripts packetfence-config + +# run only if packetfence-remote-arp-sensor has been installed +rpm -e --nodeps --noscripts packetfence-remote-arp-sensor +---- ++ + +. Recopy previous [filename]`pfconfig.conf` filename to its original location: ++ +[source,bash] +---- +mv -f /usr/local/pf/conf/pfconfig.conf.rpmsave /usr/local/pf/conf/pfconfig.conf +---- ++ + +. Upgrade PacketFence packages by following instructions in <<_packages_upgrades,Packages upgrades>> section for RHEL / CentOS based systems +. Continue upgrade procedure + +At the end of upgrade procedure, you should have only one RPM package called +[package]`packetfence`. If you previously installed +[package]`packetfence-release` package in order to have PacketFence repository +installed, this one has been upgraded to latest version. + +==== New GPG key for Debian installations (Debian only) + +NOTE: This step needs to be done *before* packages upgrade. + +In order to install new versions of Debian packages, you will need to add a new GPG key to your system: + +[source,bash] +---- +wget -O - https://inverse.ca/downloads/GPG_PUBLIC_KEY | sudo apt-key add - +---- + +You can safely remove the oldest one: +[source,bash] +---- +sudo apt-key del FE9E84327B18FF82B0378B6719CDA6A9810273C4 +---- + +==== Database schema + +Changes have been made to the database schema. You will need to update it accordingly. +An SQL upgrade script has been provided to upgrade the database from the 9.1 schema to 9.2. + +To upgrade the database schema, run the following command: + +[source,bash] +---- +mysql -u root -p pf -v < /usr/local/pf/db/upgrade-9.1.0-9.2.0.sql +---- + +Once completed, update the file [filename]`/usr/local/pf/conf/currently-at` to match the new release number (PacketFence 9.2.0): + +[source,bash] +---- +cat /usr/local/pf/conf/pf-release > /usr/local/pf/conf/currently-at +---- + +=== Upgrading from a version prior to 9.3.0 + +==== Execute script action doesn't use sudo anymore + +Execute script action in security events doesn't use [command]`sudo` anymore to run scripts. +Consequently, you should ensure that `pf` user is: + +* able to read and execute these scripts +* able to run commands inside these scripts (without `sudo`) + +==== Database schema + +Changes have been made to the database schema. You will need to update it accordingly. +An SQL upgrade script has been provided to upgrade the database from the 9.2 schema to 9.3. + +To upgrade the database schema, run the following command: + +[source,bash] +---- +mysql -u root -p pf -v < /usr/local/pf/db/upgrade-9.2.0-9.3.0.sql +---- + +Once completed, update the file [filename]`/usr/local/pf/conf/currently-at` to match the new release number (PacketFence 9.3.0): + +[source,bash] +---- +cat /usr/local/pf/conf/pf-release > /usr/local/pf/conf/currently-at +---- + + From 87c8faaf195edcc04bcdfe73db858b8f62486178 Mon Sep 17 00:00:00 2001 From: Satkunas <3904468+satkunas@users.noreply.github.com> Date: Tue, 24 Sep 2024 13:19:16 -0400 Subject: [PATCH 016/176] Fix/8314 (#8322) * add default tiemstamp in radius_audit_log, fixes #8314 * Add created_at on Insert --------- Co-authored-by: Darren+Satkunas Co-authored-by: James Rouzier --- db/pf-schema-X.Y.sql | 2 +- db/upgrade-X.X-X.Y.sql | 3 +++ go/cron/flush_radius_audit_log_job.go | 4 ++-- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/db/pf-schema-X.Y.sql b/db/pf-schema-X.Y.sql index 46ecce4a44ed..29f4f630b06b 100644 --- a/db/pf-schema-X.Y.sql +++ b/db/pf-schema-X.Y.sql @@ -1105,7 +1105,7 @@ CREATE TABLE pf_version (`id` INT NOT NULL PRIMARY KEY, `version` VARCHAR(11) NO CREATE TABLE radius_audit_log ( `id` BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, - `created_at` TIMESTAMP NOT NULL, + `created_at` timestamp NOT NULL DEFAULT current_timestamp(), `mac` char(17) NOT NULL, `ip` varchar(255) NULL, `computer_name` varchar(255) NULL, diff --git a/db/upgrade-X.X-X.Y.sql b/db/upgrade-X.X-X.Y.sql index f9e65c5b2af8..0de7815f93f2 100644 --- a/db/upgrade-X.X-X.Y.sql +++ b/db/upgrade-X.X-X.Y.sql @@ -64,6 +64,9 @@ ALTER TABLE `pki_certs` DROP INDEX IF EXISTS `subject`, ADD UNIQUE KEY IF NOT EXISTS `cn_serial` (`cn`,`serial_number`) USING HASH; +\! echo "Adding default timestamp to RADIUS audit logs"; +ALTER TABLE radius_audit_log MODIFY created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP; + \! echo "Incrementing PacketFence schema version..."; INSERT IGNORE INTO pf_version (id, version, created_at) VALUES (@VERSION_INT, CONCAT_WS('.', @MAJOR_VERSION, @MINOR_VERSION), NOW()); diff --git a/go/cron/flush_radius_audit_log_job.go b/go/cron/flush_radius_audit_log_job.go index dd2c6a8819f1..e462f3e7a7f8 100644 --- a/go/cron/flush_radius_audit_log_job.go +++ b/go/cron/flush_radius_audit_log_job.go @@ -161,7 +161,7 @@ func (j *FlushRadiusAuditLogJob) buildQuery(entries [][]interface{}) (string, [] sql := ` INSERT INTO radius_audit_log ( - mac, ip, computer_name, + created_at, mac, ip, computer_name, user_name, stripped_user_name, realm, event_type, switch_id, switch_mac, switch_ip_address, radius_source_ip_address, called_station_id, calling_station_id, @@ -175,7 +175,7 @@ INSERT INTO radius_audit_log radius_reply, request_time, radius_ip ) VALUES ` - bind := "(?" + strings.Repeat(",?", RADIUS_AUDIT_LOG_COLUMN_COUNT-1) + ")" + bind := "(NOW(), ?" + strings.Repeat(",?", RADIUS_AUDIT_LOG_COLUMN_COUNT-1) + ")" sql += bind + strings.Repeat(","+bind, len(entries)-1) args := make([]interface{}, 0, len(entries)*RADIUS_AUDIT_LOG_COLUMN_COUNT) for _, e := range entries { From 2a6c513542e2782fdb652233f8e08fa6adcd6b81 Mon Sep 17 00:00:00 2001 From: Zhihao Ma Date: Tue, 24 Sep 2024 17:34:58 -0400 Subject: [PATCH 017/176] revert import-reformat --- bin/pyntlm_auth/handlers.py | 20 ++++++++++---------- bin/pyntlm_auth/rpc.py | 19 ++++++++----------- 2 files changed, 18 insertions(+), 21 deletions(-) diff --git a/bin/pyntlm_auth/handlers.py b/bin/pyntlm_auth/handlers.py index 655763ef4e27..f1e4cd7e794f 100644 --- a/bin/pyntlm_auth/handlers.py +++ b/bin/pyntlm_auth/handlers.py @@ -1,17 +1,17 @@ -import binascii -import hashlib -import re from http import HTTPStatus - -from flask import request, g -from samba import ntstatus - -import flags -import global_vars -import ms_event +from flask import Flask, request, g import ncache +import re +import hashlib +import binascii +import json +import utils +import ms_event +import global_vars import rpc +import flags +from samba import param, NTSTATUSError, ntstatus # For NTSTATUS, see: # https://github.com/samba-team/samba/blob/master/libcli/util/ntstatus_err_table.txt diff --git a/bin/pyntlm_auth/rpc.py b/bin/pyntlm_auth/rpc.py index 4bf888ef0fcc..425e65a6cf67 100644 --- a/bin/pyntlm_auth/rpc.py +++ b/bin/pyntlm_auth/rpc.py @@ -1,17 +1,14 @@ -import binascii -import datetime -import random - -from samba import param, NTSTATUSError +import global_vars +import config_generator +from samba import param, NTSTATUSError, ntstatus from samba.credentials import Credentials, DONT_USE_KERBEROS -from samba.dcerpc import netlogon from samba.dcerpc.misc import SEC_CHAN_WKSTA -from samba.dcerpc.netlogon import (netr_Authenticator, MSV1_0_ALLOW_WORKSTATION_TRUST_ACCOUNT, MSV1_0_ALLOW_MSVCHAPV2) - -import config_generator -import global_vars +from samba.dcerpc import netlogon import utils - +import datetime +from samba.dcerpc.netlogon import (netr_Authenticator, MSV1_0_ALLOW_WORKSTATION_TRUST_ACCOUNT, MSV1_0_ALLOW_MSVCHAPV2) +import binascii +import random def init_secure_connection(): netbios_name = global_vars.c_netbios_name From c57e0f0284b502a5aff80af63f638277fcca6176 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Wed, 25 Sep 2024 07:50:21 -0400 Subject: [PATCH 018/176] Fix packetfence maintenance branch to match related tag --- ci/debian-installer/Makefile | 2 +- ci/debian-installer/build-and-upload.sh | 9 +++++++++ ci/packer/zen/build-and-upload.sh | 9 +++++++++ 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/ci/debian-installer/Makefile b/ci/debian-installer/Makefile index ea921b5adb34..f4e472018583 100644 --- a/ci/debian-installer/Makefile +++ b/ci/debian-installer/Makefile @@ -1,4 +1,4 @@ -PF_VERSION=$(CI_COMMIT_REF_SLUG) +PF_VERSION=$(CI_COMMIT_REF_NAME) RESULT_DIR=results .PHONY: packetfence-debian-installer.iso diff --git a/ci/debian-installer/build-and-upload.sh b/ci/debian-installer/build-and-upload.sh index 2e84592b7a5c..5ec0425f9e9a 100755 --- a/ci/debian-installer/build-and-upload.sh +++ b/ci/debian-installer/build-and-upload.sh @@ -3,6 +3,15 @@ set -o nounset -o pipefail -o errexit PF_VERSION=${PF_VERSION:-localtest} +# Fix PF version if maintenance to match tag +if [[ "$PF_VERSION" =~ ^maintenance\/([0-9]+\.[0-9]+)$ ]]; +then + PF_VERSION=v; + PF_VERSION+=${BASH_REMATCH[1]}; + PF_VERSION+=.0; + echo "Maintenance Branch detected, try to match tag version with PF version = $PF_VERSION" +fi + PF_RELEASE="`echo $PF_RELEASE | sed -r 's/.*\b([0-9]+\.[0-9]+)\.[0-9]+/\1/g'`" ISO_NAME=PacketFence-ISO-${PF_VERSION}.iso diff --git a/ci/packer/zen/build-and-upload.sh b/ci/packer/zen/build-and-upload.sh index 92d734e8dac4..4b1efa2b0e48 100755 --- a/ci/packer/zen/build-and-upload.sh +++ b/ci/packer/zen/build-and-upload.sh @@ -1,6 +1,15 @@ #!/bin/bash set -o nounset -o pipefail -o errexit +# Fix PF version if maintenance to match tag +if [[ "$PF_VERSION" =~ ^maintenance\/([0-9]+\.[0-9]+)$ ]]; +then + PF_VERSION=v; + PF_VERSION+=${BASH_REMATCH[1]}; + PF_VERSION+=.0; + echo "Maintenance Branch detected, try to match tag version with PF version = $PF_VERSION" +fi + VM_NAME=${VM_NAME:-vm} VBOX_RESULT_DIR=${VBOX_RESULT_DIR:-results/virtualbox} From 0fa070c0a9105e47e8cbb4745668037c4e73de2e Mon Sep 17 00:00:00 2001 From: James Rouzier Date: Wed, 25 Sep 2024 16:46:09 +0000 Subject: [PATCH 019/176] Update mac addresses of pfflow if they are in our system --- go/cron/aggregator.go | 56 +++++++++++++++++++++++++++++++++----- go/cron/aggregator_test.go | 2 ++ go/cron/pfflow.go | 49 +++++++++++++++++---------------- 3 files changed, 77 insertions(+), 30 deletions(-) diff --git a/go/cron/aggregator.go b/go/cron/aggregator.go index 23fcba53f09f..d09276938c9a 100644 --- a/go/cron/aggregator.go +++ b/go/cron/aggregator.go @@ -7,6 +7,8 @@ import ( "math" "net/netip" "time" + + "github.com/inverse-inc/go-utils/log" ) type EventKey struct { @@ -53,9 +55,46 @@ type Aggregator struct { db *sql.DB } +func updateMacs(ctx context.Context, f *PfFlow, stmt *sql.Stmt) { + if f.SrcMac != "00:00:00:00:00:00" && f.DstMac != "00:00:00:00:00:00" { + return + } + + var srcMac, dstMac string + err := stmt.QueryRowContext(ctx, f.SrcIp.String(), f.DstIp.String()).Scan(&srcMac, &dstMac) + if err != nil { + log.LogErrorf(ctx, "updateMacs Database Error: %s", err.Error()) + } + + if f.SrcMac == "00:00:00:00:00:00" { + f.SrcMac = srcMac + } + + if f.DstMac == "00:00:00:00:00:00" { + f.DstMac = dstMac + } +} + +const updateMacsSql = ` +SELECT + COALESCE((SELECT mac FROM ip4log WHERE ip = ?), "00:00:00:00:00:00") as src_mac, + COALESCE((SELECT mac FROM ip4log WHERE ip = ?), "00:00:00:00:00:00") as dst_mac; +` + func (a *Aggregator) handleEvents() { ctx := context.Background() ticker := time.NewTicker(a.timeout) + stmt, err := new(sql.Stmt), error(nil) + // if a.db != nil { + stmt, err = a.db.PrepareContext(ctx, updateMacsSql) + if err != nil { + log.LogErrorf(ctx, "handleEvents Database Error: %s %s", updateMacsSql, err.Error()) + stmt = nil + } else { + defer stmt.Close() + } + // } + loop: for { select { @@ -67,6 +106,11 @@ loop: if a.Heuristics > 0 { f.Heuristics() } + + if stmt != nil { + updateMacs(ctx, &f, stmt) + } + a.events[key] = append(val, f) } } @@ -76,13 +120,11 @@ loop: startTime := int64(math.MaxInt64) endTime := int64(0) packetCount := uint64(0) - networkEvent := events[0].ToNetworkEvent() - if networkEvent == nil { - for _, e := range events[1:] { - networkEvent = e.ToNetworkEvent() - if networkEvent != nil { - break - } + var networkEvent *NetworkEvent + for _, e := range events { + networkEvent = e.ToNetworkEvent() + if networkEvent != nil { + break } } diff --git a/go/cron/aggregator_test.go b/go/cron/aggregator_test.go index 861f494656b5..1696071ff5bb 100644 --- a/go/cron/aggregator_test.go +++ b/go/cron/aggregator_test.go @@ -8,6 +8,7 @@ import ( func TestAggregator(t *testing.T) { networkEventChan := make(chan []*NetworkEvent, 100) + db, _ := getDb() events := []*PfFlows{ { Flows: &[]PfFlow{ @@ -55,6 +56,7 @@ func TestAggregator(t *testing.T) { &AggregatorOptions{ Timeout: 10 * time.Millisecond, NetworkEventChan: networkEventChan, + Db: db, }, ) diff --git a/go/cron/pfflow.go b/go/cron/pfflow.go index f577a45181e9..d7410a744167 100644 --- a/go/cron/pfflow.go +++ b/go/cron/pfflow.go @@ -27,30 +27,33 @@ type PfFlows struct { //easyjson:json type PfFlow struct { - SrcIp netip.Addr `json:"src_ip"` - DstIp netip.Addr `json:"dst_ip"` - NextAddr netip.Addr `json:"next_addr"` - SrcMac string `json:"src_mac"` - DstMac string `json:"dst_mac"` + SrcIp netip.Addr `json:"src_ip,omitempty"` + DstIp netip.Addr `json:"dst_ip,omitempty"` + NextAddr netip.Addr `json:"next_addr,omitempty"` + SrcMac string `json:"src_mac,omitempty"` + DstMac string `json:"dst_mac,omitempty"` + PostSrcMac string `json:"post_src_mac,omitempty"` + PostDstMac string `json:"post_dst_mac,omitempty"` StartTime int64 `json:"start_time"` EndTime int64 `json:"end_time"` - ByteCount uint32 `json:"byte_count"` - First uint32 `json:"first"` - Last uint32 `json:"last"` - SrcPort uint16 `json:"src_port"` - DstPort uint16 `json:"dst_port"` - SnmpIndexInput uint16 `json:"snmp_index_input"` - SnmpIndexOutput uint16 `json:"snmp_index_output"` - PacketCount uint64 `json:"packet_count"` - SrcAS uint16 `json:"src_as"` - DstAS uint16 `json:"dst_as"` - TCPFlags uint8 `json:"tcp_flags"` - BiFlow uint8 `json:"biflow"` - Direction uint8 `json:"direction"` - Proto uint8 `json:"proto"` - SrcMask uint8 `json:"src_mask"` - DstMask uint8 `json:"dst_mask"` - ToS uint8 `json:"tos"` + PacketCount uint64 `json:"packet_count,omitempty"` + ConnectionCount uint64 `json:"connection_count,omitempty"` + ByteCount uint32 `json:"byte_count,omitempty"` + First uint32 `json:"first,omitempty"` + Last uint32 `json:"last,omitempty"` + SrcPort uint16 `json:"src_port,omitempty"` + DstPort uint16 `json:"dst_port,omitempty"` + SnmpIndexInput uint16 `json:"snmp_index_input,omitempty"` + SnmpIndexOutput uint16 `json:"snmp_index_output,omitempty"` + SrcAS uint16 `json:"src_as,omitempty"` + DstAS uint16 `json:"dst_as,omitempty"` + TCPFlags uint8 `json:"tcp_flags,omitempty"` + BiFlow uint8 `json:"biflow,omitempty"` + Direction uint8 `json:"direction,omitempty"` + Proto uint8 `json:"proto,omitempty"` + SrcMask uint8 `json:"src_mask,omitempty"` + DstMask uint8 `json:"dst_mask,omitempty"` + ToS uint8 `json:"tos,omitempty"` } func (f *PfFlow) Key(h *PfFlowHeader) EventKey { @@ -158,7 +161,7 @@ func (f *PfFlow) ToNetworkEvent() *NetworkEvent { IpProtocol: ipProto, IpVersion: IpVersionIpv4, EnforcementState: EnforcementStateEnforcing, - Count: 1, + Count: int(f.ConnectionCount), StartTime: uint64(time.Now().Unix()), Direction: f.NetworkEventDirection(), DestInventoryitem: f.DestInventoryitem(), From 9794df244d7001417e4d0fce6be7099fda0ef58a Mon Sep 17 00:00:00 2001 From: James Rouzier Date: Thu, 26 Sep 2024 05:45:15 +0000 Subject: [PATCH 020/176] Fix aggregation --- go/cron/aggregator.go | 14 +++++--- go/cron/aggregator_test.go | 66 ++++++++++++++++++++------------------ go/cron/pfflow.go | 6 ++++ 3 files changed, 50 insertions(+), 36 deletions(-) diff --git a/go/cron/aggregator.go b/go/cron/aggregator.go index d09276938c9a..050b138c875e 100644 --- a/go/cron/aggregator.go +++ b/go/cron/aggregator.go @@ -1,7 +1,6 @@ package maint import ( - "cmp" "context" "database/sql" "math" @@ -12,6 +11,8 @@ import ( ) type EventKey struct { + DomainID uint32 + FlowSeq uint32 SrcIp netip.Addr DstIp netip.Addr DstPort uint16 @@ -119,7 +120,7 @@ loop: for _, events := range a.events { startTime := int64(math.MaxInt64) endTime := int64(0) - packetCount := uint64(0) + connectionCount := uint64(0) var networkEvent *NetworkEvent for _, e := range events { networkEvent = e.ToNetworkEvent() @@ -136,11 +137,14 @@ loop: for _, e := range events { startTime = min(startTime, e.StartTime) endTime = max(endTime, e.EndTime) - ports[e.SessionKey()] = struct{}{} - packetCount += cmp.Or(e.PacketCount, 1) + sessionKey := e.SessionKey() + if _, ok := ports[sessionKey]; !ok { + ports[sessionKey] = struct{}{} + connectionCount += e.ConnectionCount + } } - networkEvent.Count = len(ports) + networkEvent.Count = int(connectionCount) if startTime != 0 { networkEvent.StartTime = uint64(startTime) } diff --git a/go/cron/aggregator_test.go b/go/cron/aggregator_test.go index 1696071ff5bb..0368b9d28621 100644 --- a/go/cron/aggregator_test.go +++ b/go/cron/aggregator_test.go @@ -11,42 +11,46 @@ func TestAggregator(t *testing.T) { db, _ := getDb() events := []*PfFlows{ { + Header: PfFlowHeader{ + DomainID: 1, + FlowSeq: 1, + }, Flows: &[]PfFlow{ { - SrcIp: netip.AddrFrom4([4]byte{1, 1, 1, 2}), - DstIp: netip.AddrFrom4([4]byte{1, 1, 1, 1}), - SrcPort: 80, - DstPort: 1025, - Proto: 6, - BiFlow: 2, - PacketCount: 1, + SrcIp: netip.AddrFrom4([4]byte{1, 1, 1, 2}), + DstIp: netip.AddrFrom4([4]byte{1, 1, 1, 1}), + SrcPort: 80, + DstPort: 1025, + Proto: 6, + BiFlow: 2, + ConnectionCount: 2, }, { - SrcIp: netip.AddrFrom4([4]byte{1, 1, 1, 1}), - DstIp: netip.AddrFrom4([4]byte{1, 1, 1, 2}), - SrcPort: 1024, - DstPort: 80, - Proto: 6, - BiFlow: 1, - PacketCount: 1, + SrcIp: netip.AddrFrom4([4]byte{1, 1, 1, 1}), + DstIp: netip.AddrFrom4([4]byte{1, 1, 1, 2}), + SrcPort: 1024, + DstPort: 80, + Proto: 6, + BiFlow: 1, + ConnectionCount: 2, }, { - SrcIp: netip.AddrFrom4([4]byte{1, 1, 1, 1}), - DstIp: netip.AddrFrom4([4]byte{1, 1, 1, 2}), - SrcPort: 1024, - DstPort: 80, - Proto: 6, - BiFlow: 1, - PacketCount: 1, + SrcIp: netip.AddrFrom4([4]byte{1, 1, 1, 2}), + DstIp: netip.AddrFrom4([4]byte{1, 1, 1, 1}), + SrcPort: 80, + DstPort: 1024, + Proto: 6, + BiFlow: 2, + ConnectionCount: 2, }, { - SrcIp: netip.AddrFrom4([4]byte{1, 1, 1, 1}), - DstIp: netip.AddrFrom4([4]byte{1, 1, 1, 2}), - SrcPort: 1025, - DstPort: 80, - Proto: 6, - BiFlow: 1, - PacketCount: 1, + SrcIp: netip.AddrFrom4([4]byte{1, 1, 1, 1}), + DstIp: netip.AddrFrom4([4]byte{1, 1, 1, 2}), + SrcPort: 1025, + DstPort: 80, + Proto: 6, + BiFlow: 1, + ConnectionCount: 2, }, }, }, @@ -67,8 +71,8 @@ func TestAggregator(t *testing.T) { t.Fatalf("Not aggreated to a single network event") } - if ne[0].Count != 2 { - t.Fatalf("Not aggreated properly") + if ne[0].Count != 4 { + t.Fatalf("Not aggreated properly got %d expected %d", ne[0].Count, 4) } if ne[0].DestPort != 80 { @@ -119,7 +123,7 @@ func TestAggregator(t *testing.T) { t.Fatalf("Not aggreated to a single network event") } - if ne[0].Count != 2 { + if ne[0].Count != 0 { t.Fatalf("Not aggreated properly") } diff --git a/go/cron/pfflow.go b/go/cron/pfflow.go index d7410a744167..5146c1dc5de9 100644 --- a/go/cron/pfflow.go +++ b/go/cron/pfflow.go @@ -60,6 +60,8 @@ func (f *PfFlow) Key(h *PfFlowHeader) EventKey { switch f.BiFlow { default: return EventKey{ + DomainID: h.DomainID, + FlowSeq: h.FlowSeq, SrcIp: f.SrcIp, DstIp: f.DstIp, DstPort: f.DstPort, @@ -68,6 +70,8 @@ func (f *PfFlow) Key(h *PfFlowHeader) EventKey { } case 1: return EventKey{ + DomainID: h.DomainID, + FlowSeq: h.FlowSeq, SrcIp: f.SrcIp, DstIp: f.DstIp, DstPort: f.DstPort, @@ -76,6 +80,8 @@ func (f *PfFlow) Key(h *PfFlowHeader) EventKey { } case 2: return EventKey{ + DomainID: h.DomainID, + FlowSeq: h.FlowSeq, DstIp: f.SrcIp, SrcIp: f.DstIp, DstPort: f.SrcPort, From b60ee4752146f0f0b38305d83655731a5da8f019 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Thu, 26 Sep 2024 13:12:59 -0400 Subject: [PATCH 021/176] Add exportable backup for PF --- addons/backup-and-maintenance.sh | 182 ++++++++++++++++++------------- addons/exportable-backup.sh | 173 +++++++++++++++++++++++++++++ addons/full-import/export.sh | 33 +++--- 3 files changed, 297 insertions(+), 91 deletions(-) create mode 100755 addons/exportable-backup.sh diff --git a/addons/backup-and-maintenance.sh b/addons/backup-and-maintenance.sh index 02256723e6e1..4926f19e12e6 100755 --- a/addons/backup-and-maintenance.sh +++ b/addons/backup-and-maintenance.sh @@ -12,6 +12,65 @@ # Licensed under the GPL # +source /usr/local/pf/addons/functions/helpers.functions + +############################################################################# +### Help +############################################################################# + +help(){ + cat <<-EOF +$0 Backup a PacketFence instance + +Usage: $0 -f /path/to/backup_file.tgz [OPTION]... + +Options: + -h,--help Display this help + --db Backup only database from PacketFence database + --conf Backup only configuration from PacketFence configuration +EOF +} + +############################################################################# +### Handle args +############################################################################# +do_full_backup=1 +do_db_backup=0 +do_config_backup=0 +do_replication=0 +mariabackup_installed=false + +# Parse option +TEMP=$(getopt -o f:h --long file:,help,db,conf \ + -n "$0" -- "$@") || (echo "getopt failed." && exit 1) + +# Note the quotes around `$TEMP': they are essential! +eval set -- "$TEMP" + +while true ; do + case "$1" in + -h|--help) + help ; exit 0 ; shift + ;; + --db) + do_db_backup=1 ; do_full_backup=0 ; shift + ;; + --conf) + do_config_backup=1 ; do_full_backup=0 ; shift + ;; + --) + shift ; break + ;; + *) + echo "Wrong usage !" ; help ; exit 1 + ;; + esac +done + +############################################################################# +### Variables +############################################################################# + NB_DAYS_TO_KEEP_DB=7 NB_DAYS_TO_KEEP_FILES=7 PF_DIRECTORY='/usr/local/pf/' @@ -23,72 +82,62 @@ REP_USER=$($PF_DIRECTORY/bin/get_pf_conf active_active galera_replication_userna REP_PWD=$($PF_DIRECTORY/bin/get_pf_conf active_active galera_replication_password) BACKUP_DIRECTORY=${BACKUP_DIRECTORY:-/root/backup/} BACKUP_DB_FILENAME='packetfence-db-dump' -BACKUP_PF_FILENAME='packetfence-files-dump' +BACKUP_CONF_FILENAME='packetfence-conf-dump' ARCHIVE_DIRECTORY=$BACKUP_DIRECTORY ARCHIVE_DB_FILENAME='packetfence-archive' MARIABACKUP_INSTALLED=0 BACKUPRC=1 -# For replication -ACTIVATE_REPLICATION=0 -REPLICATION_USER='' -NODE1_HOSTNAME='' -NODE2_HOSTNAME='' -NODE1_IP='' -NODE2_IP='' - -# to detect MariaDB remote DB -if [ "$DB_HOST" != "localhost" ] && [ "$DB_HOST" != "100.64.0.1" ]; then - MARIADB_REMOTE_CLUSTER=1 -else - MARIADB_REMOTE_CLUSTER=0 -fi - -# Create the backup directory -if [ ! -d "$BACKUP_DIRECTORY" ]; then - mkdir -p $BACKUP_DIRECTORY - echo -e "$BACKUP_DIRECTORY , created. \n" -else - echo -e "$BACKUP_DIRECTORY , folder already created. \n" -fi - -PF_USED_SPACE=`du -s $PF_DIRECTORY --exclude=logs --exclude=var | awk '{ print $1 }'` -BACKUPS_AVAILABLE_SPACE=`df --output=avail $BACKUP_DIRECTORY | awk 'NR == 2 { print $1 }'` - -if (( $BACKUPS_AVAILABLE_SPACE > (( $PF_USED_SPACE / 2 )) )); then - # Backup complete PacketFence installation except logs - current_tgz=$BACKUP_DIRECTORY/$BACKUP_PF_FILENAME-`date +%F_%Hh%M`.tgz - if [ ! -f $BACKUP_DIRECTORY$BACKUP_PF_FILENAME ]; then - tar -czf $current_tgz --exclude='logs/*' --exclude='var/*' --exclude='.git/*' --exclude='conf/certmanager/*' --directory $PF_DIRECTORY . - BACKUPRC=$? - if (( $BACKUPRC > 0 )); then - echo "ERROR: PacketFence files backup was not successful" >&2 - echo "ERROR: PacketFence files backup was not successful" > /usr/local/pf/var/backup_files.status - else - echo -e $BACKUP_PF_FILENAME "have been created in $BACKUP_DIRECTORY \n" - echo "OK" > /usr/local/pf/var/backup_files.status - find $BACKUP_DIRECTORY -name "packetfence-files-dump-*.tgz" -mtime +$NB_DAYS_TO_KEEP_FILES -print0 | xargs -0r rm -f - echo -e "$BACKUP_PF_FILENAME older than $NB_DAYS_TO_KEEP_FILES days have been removed. \n" - fi +die() { + echo "$(basename $0): $@" >&2 ; exit 1 +} + +create_backup_directory(){ + # Create the backup directory + if [ ! -d "$BACKUP_DIRECTORY" ]; then + mkdir -p $BACKUP_DIRECTORY + echo -e "$BACKUP_DIRECTORY , created. \n" else - echo -e $BACKUP_DIRECTORY$BACKUP_PF_FILENAME ", file already created. \n" + echo -e "$BACKUP_DIRECTORY , folder already created. \n" fi -else - echo "ERROR: There is not enough space in $BACKUP_DIRECTORY to safely backup files. Skipping the backup." >&2 - echo "ERROR: There is not enough space in $BACKUP_DIRECTORY to safely backup files. Skipping the backup." > /usr/local/pf/var/backup_files.status -fi +} -die() { - echo "$(basename $0): $@" >&2 ; exit 1 +should_backup_config(){ + PF_USED_SPACE=`du -s $PF_DIRECTORY --exclude=logs --exclude=var | awk '{ print $1 }'` + BACKUPS_AVAILABLE_SPACE=`df --output=avail $BACKUP_DIRECTORY | awk 'NR == 2 { print $1 }'` + + if (( $BACKUPS_AVAILABLE_SPACE > (( $PF_USED_SPACE / 2 )) )); then + # Backup complete PacketFence installation except logs + current_tgz=$BACKUP_DIRECTORY/$BACKUP_CONF_FILENAME-`date +%F_%Hh%M`.tgz + if [ ! -f $BACKUP_DIRECTORY$BACKUP_CONF_FILENAME ]; then + tar -czf $current_tgz --exclude='logs/*' --exclude='var/*' --exclude='.git/*' --exclude='conf/certmanager/*' --directory $PF_DIRECTORY . + BACKUPRC=$? + if (( $BACKUPRC > 0 )); then + echo "ERROR: PacketFence files backup was not successful" >&2 + echo "ERROR: PacketFence files backup was not successful" > /usr/local/pf/var/backup_files.status + else + echo -e $BACKUP_CONF_FILENAME "have been created in $BACKUP_DIRECTORY \n" + echo "OK" > /usr/local/pf/var/backup_files.status + find $BACKUP_DIRECTORY -name "packetfence-conf-dump-*.tgz" -mtime +$NB_DAYS_TO_KEEP_FILES -print0 | xargs -0r rm -f + echo -e "$BACKUP_CONF_FILENAME older than $NB_DAYS_TO_KEEP_FILES days have been removed. \n" + fi + else + echo -e $BACKUP_DIRECTORY$BACKUP_CONF_FILENAME ", file already created. \n" + fi + else + echo "ERROR: There is not enough space in $BACKUP_DIRECTORY to safely backup files. Skipping the backup." >&2 + echo "ERROR: There is not enough space in $BACKUP_DIRECTORY to safely backup files. Skipping the backup." > /usr/local/pf/var/backup_files.status + fi } -should_backup(){ +should_backup_database(){ # Default choices SHOULD_BACKUP=1 MARIADB_LOCAL_CLUSTER=0 MARIADB_DISABLE_GALERA=1 - if [ $MARIADB_REMOTE_CLUSTER -eq 1 ]; then + # to detect MariaDB remote DB + if [ "$DB_HOST" != "localhost" ] && [ "$DB_HOST" != "100.64.0.1" ]; then echo "Remote database detected: backup should be done on database server itself." exit $BACKUPRC fi @@ -107,12 +156,17 @@ should_backup(){ else echo -e "First server of the cluster : database backup will start.\n" fi - else + fi + # Is the database running on the current server and should we be running a backup ? + if [ $SHOULD_BACKUP -eq 1 ] && { [ -f /var/run/mysqld/mysqld.pid ] || [ -f /var/run/mariadb/mariadb.pid ] || [ -f /var/lib/mysql/`hostname`.pid ]; }; then echo "Database backup will start" + backup_database + else + echo "Nothing to do" fi } -backup_db(){ +backup_database(){ # Check to see if Mariabackup is installed if hash mariabackup 2>/dev/null; then echo -e "Mariabackup is available. Will proceed using it for DB backup to avoid locking tables and easier recovery process. \n" @@ -176,25 +230,7 @@ backup_db(){ fi } -should_backup -# Is the database running on the current server and should we be running a backup ? -if [ $SHOULD_BACKUP -eq 1 ] && { [ -f /var/run/mysqld/mysqld.pid ] || [ -f /var/run/mariadb/mariadb.pid ] || [ -f /var/lib/mysql/`hostname`.pid ]; }; then - backup_db -else - echo "Nothing to do" -fi - -# Replicate the db backups between both servers -if [ $ACTIVATE_REPLICATION == 1 ]; then - if [ $HOSTNAME == $NODE1_HOSTNAME ]; then - replicate_to=$NODE2_IP - elif [ $HOSTNAME == $NODE2_HOSTNAME ]; then - replicate_to=$NODE1_IP - else - echo "Cannot recognize hostname. This script is made for $NODE1_HOSTNAME and $NODE2_HOSTNAME. Exiting" >&2 - exit 1 - fi; - eval "rsync -auv -e ssh --delete --include '$BACKUP_DB_FILENAME*' --exclude='*' $BACKUP_DIRECTORY $REPLICATION_USER@$replicate_to:$BACKUP_DIRECTORY" -fi +should_backup_config +should_backup_database exit $BACKUPRC diff --git a/addons/exportable-backup.sh b/addons/exportable-backup.sh new file mode 100755 index 000000000000..71a166552186 --- /dev/null +++ b/addons/exportable-backup.sh @@ -0,0 +1,173 @@ +#!/bin/bash +# +# Backup of $PF_DIRECTORY and $DB_NAME that can be used in export/import procedure +# +# - compressed $PF_DIRECTORY to $BACKUP_DIRECTORY +# - compressed mysqldump to $BACKUP_DIRECTORY +# - prepare files for backup and exportation, rotate and clean +# +# Copyright (C) 2005-2024 Inverse inc. +# +# Author: Inverse inc. +# +# Licensed under the GPL +# + +source /usr/local/pf/addons/functions/helpers.functions + +BACKUP_DIRECTORY=${BACKUP_DIRECTORY:-/root/backup/} +BACKUP_DB_FILENAME='packetfence-db-dump' +BACKUP_CONF_FILENAME='packetfence-conf-dump' +BACKUP_OLD_CONF_FILENAME='packetfence-files-dump' +BACKUP_PF_FILENAME='packetfence-exportable-backup' +NB_DAYS_TO_KEEP_BACKUP=${NB_DAYS_TO_KEEP_BACKUP:-7} +BACKUPRC=1 + +############################################################################# +### Replicate +############################################################################# +replicate_backup(){ + REPLICATION_USER=${REPLICATION_USER:-root} + NODE1_HOSTNAME=${NODE1_HOSTNAME:-node1_hostname} + NODE2_HOSTNAME=${NODE2_HOSTNAME:-node2_hostname} + NODE1_IP=${NODE1_IP:-node1_ip_address} + NODE2_IP=${NODE2_IP:-node2_ip_address} + + if [ $HOSTNAME == $NODE1_HOSTNAME ]; then + replicate_to=$NODE2_IP + elif [ $HOSTNAME == $NODE2_HOSTNAME ]; then + replicate_to=$NODE1_IP + else + echo "Cannot recognize hostname. This script is made for $NODE1_HOSTNAME and $NODE2_HOSTNAME. Exiting" >&2 + exit 1 + fi; + eval "rsync -auv -e ssh --delete --include '$BACKUP_DB_FILENAME*' --exclude='*' $BACKUP_DIRECTORY $REPLICATION_USER@$replicate_to:$BACKUP_DIRECTORY" + exit $BACKUPRC +} + +############################################################################# +### Help +############################################################################# +help(){ + cat <<-EOF +$0 Backup a PacketFence instance + +Usage: $0 -f /path/to/backup_file.tgz [OPTION]... + +Options: + -f,--file Backup in a specific PacketFence (by default it will be under /root/backup/) + -h,--help Display this help + --db Backup only database from PacketFence database + --conf Backup only configuration from PacketFence configuration + --replication Replicate the backup accross two nodes. + +EOF +} + +############################################################################# +### Clean old exportable backup archive +############################################################################# +clean_old_backup_archive(){ + find $BACKUP_DIRECTORY -name "$BACKUP_PF_FILENAME-*.tgz" -mtime +$NB_DAYS_TO_KEEP_BACKUP -delete +} + +############################################################################# +### Disk space +############################################################################# +check_disk_space(){ + BACKUPS_AVAILABLE_SPACE=`df --output=avail $BACKUP_DIRECTORY | awk 'NR == 2 { print $1 }'` + MYSQL_USED_SPACE=`du -s /var/lib/mysql | awk '{ print $1 }'` + CONF_USED_SPACE=`du -s $PF_DIRECTORY --exclude=logs --exclude=var | awk '{ print $1 }'` + if (( $BACKUPS_AVAILABLE_SPACE < (( (( $MYSQL_USED_SPACE + $CONF_USED_SPACE )) /2 )) )); then + echo "There is not enough space in $BACKUP_DIRECTORY to safely backup exportable. Skipping backup." >&2 + echo "There is not enough space in $BACKUP_DIRECTORY to safely backup exportable. Skipping backup." > /usr/local/pf/var/backup_pf.status + fi + exit $BACKUPRC +} + + +create_backup_directory(){ + # Create the backup directory + if [ ! -d "$BACKUP_DIRECTORY" ]; then + mkdir -p $BACKUP_DIRECTORY + echo -e "$BACKUP_DIRECTORY , created. \n" + else + echo -e "$BACKUP_DIRECTORY , folder already created. \n" + fi +} + + +############################################################################# +### Cleaning +############################################################################# +clean_backup(){ + find $BACKUP_DIRECTORY -name "$BACKUP_PF_FILENAME-*.tgz" -mtime +$NB_DAYS_TO_KEEP_BACKUP -delete + find $BACKUP_DIRECTORY -name "$BACKUP_DB_FILENAME-*.sql.gz" -delete + find $BACKUP_DIRECTORY -name "$BACKUP_CONF_FILENAME-*.tgz" -delete + find $BACKUP_DIRECTORY -name "$BACKUP_OLD_CONF_FILENAME-*.tgz" -delete +} + +############################################################################# +### Handle args +############################################################################# +do_full_backup=1 +do_db_backup=0 +do_config_backup=0 +do_replication=0 +BACKUP_FILE=${BACKUP_FILE:-} + +# Parse option +TEMP=$(getopt -o f:h --long file:,help,db,conf,replication \ + -n "$0" -- "$@") || (echo "getopt failed." && exit 1) + +# Note the quotes around `$TEMP': they are essential! +eval set -- "$TEMP" + +while true ; do + case "$1" in + -f|--file) + # first shift is mandatory to get file path + shift + BACKUP_FILE="$1" ; shift + ;; + -h|--help) + help ; exit 0 ; shift + ;; + --db) + do_db_backup=1 ; do_full_backup=0 ; shift + ;; + --conf) + do_config_backup=1 ; do_full_backup=0 ; shift + ;; + --replication) + do_replication=1 ; shift + ;; + --) + shift ; break + ;; + *) + echo "Wrong usage !" ; help ; exit 1 + ;; + esac +done + +if [ -z "$BACKUP_FILE" ]; then + echo "Default directory $BACKUP_DIRECTORY will be used." + BACKUP_FILE=$BACKUP_DIRECTORY/$BACKUP_CONF_FILENAME-`date +%F_%Hh%M`.tgz +fi + +############################################################################# +### Main +############################################################################# +/bin/bash /usr/local/pf/addons/backup-and-maintenance.sh +if [ ! -f $BACKUP_FILE ]; then + /bin/bash /usr/local/pf/addons/full-import/export.sh $BACKUP_FILE +else + echo -e $BACKUP_DIRECTORY$BACKUP_CONF_FILENAME ", file already created. \n" +fi +clean_backup +if [ $do_replication == 1 ]; then + replicate_backup +fi + +exit $BACKUPRC diff --git a/addons/full-import/export.sh b/addons/full-import/export.sh index 12b1ecb22b76..36b125e100a8 100755 --- a/addons/full-import/export.sh +++ b/addons/full-import/export.sh @@ -6,31 +6,28 @@ if [ -z "$1" ]; then exit 1 fi -if [ "$2" = "--force" ];then - echo "Force flag enabled" - mtime="" -else - mtime="-mtime -1" -fi - set -o nounset -o pipefail -o errexit source /usr/local/pf/addons/functions/helpers.functions source /usr/local/pf/addons/functions/database.functions output="$1" +BACKUP_DB_FILENAME='packetfence-db-dump-*' +BACKUP_CONF_FILENAME='packetfence-conf-dump-*' -db_dump=`find /root/backup -name 'packetfence-db-dump-*' -printf "%T@ %p\n" | sort -n | tail -1 | awk '{ print $2 }'` +echo "Search last database dump available." +last_db_dump=`find /root/backup -name $BACKUP_DB_FILENAME -printf "%T@ %p\n" | sort -n | tail -1 | awk '{ print $2 }'` -if [ -z "$db_dump" ]; then - echo "Unable to find a database dump that was done in the last 24 hours. Add --force to ignore this." +if [ -z "$last_db_dump" ]; then + echo "Unable to find a database dump." exit 1 fi -files_dump=`find /root/backup -name 'packetfence-files-dump-*' -printf "%T@ %p\n" | sort -n | tail -1 | awk '{ print $2 }'` +echo "Search last config dump available." +last_conf_dump=`find /root/backup -name $BACKUP_CONF_FILENAME -printf "%T@ %p\n" | sort -n | tail -1 | awk '{ print $2 }'` -if [ -z "$files_dump" ]; then - echo "Unable to find a files dump that was done in the last 24 hours. Add --force to ignore this." +if [ -z "$last_conf_dump" ]; then + echo "Unable to find a config dump." exit 1 fi @@ -46,12 +43,12 @@ pushd $build_dir main_splitter echo "Copying dump files to temporary export directory" -cp -a $db_dump $build_dir/ -cp -a $files_dump $build_dir/ +cp -a $last_db_dump $build_dir/ +cp -a $last_conf_dump $build_dir/ mariadb_args="" -if echo "$db_dump" | grep '\.sql.gz$' >/dev/null; then +if echo "$last_db_dump" | grep '\.sql.gz$' >/dev/null; then if ! test_db_connection_no_creds; then echo -n "Please enter the root password for MariaDB:" read -s mariadb_root_pass @@ -85,12 +82,12 @@ for f in $add_files; do done main_splitter -echo "Creating export archive" +echo "Creating exportable backup archive" tar -cvzf $output * check_code $? main_splitter -echo "Done exporting to $output" +echo "Done backuping to $output" popd > /dev/null From dafe939a1dcd26ae2b3938dc019a6b379fa6de59 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Thu, 26 Sep 2024 13:21:20 -0400 Subject: [PATCH 022/176] Add exportable backup cron tasks --- debian/packetfence.cron.d | 2 +- packetfence.cron.d | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/debian/packetfence.cron.d b/debian/packetfence.cron.d index 5eea0b9d654b..a3782adaf709 100644 --- a/debian/packetfence.cron.d +++ b/debian/packetfence.cron.d @@ -3,7 +3,7 @@ SHELL=/bin/sh PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin -30 0 * * * root /usr/local/pf/addons/backup-and-maintenance.sh +30 0 * * * root /usr/local/pf/addons/exportable-backup.sh # Renew any Let's Encrypt certificates the first of the month 1 0 1 * * root /usr/local/pf/bin/pfcmd renew_lets_encrypt diff --git a/packetfence.cron.d b/packetfence.cron.d index 7146e3746eaf..c42d3fc247be 100644 --- a/packetfence.cron.d +++ b/packetfence.cron.d @@ -4,7 +4,7 @@ SHELL=/bin/sh PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin # Database backup and maintenance script -30 00 * * * root /usr/local/pf/addons/backup-and-maintenance.sh +30 00 * * * root /usr/local/pf/addons/exportable-backup.sh # Renew any Let's Encrypt certificates the first of the month 1 0 1 * * root /usr/local/pf/bin/pfcmd renew_lets_encrypt From b0930a88cf8de633a43ef80be863246586c66dc0 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Thu, 26 Sep 2024 13:25:17 -0400 Subject: [PATCH 023/176] Add exportable backup in backup script --- addons/full-upgrade/run-upgrade.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/addons/full-upgrade/run-upgrade.sh b/addons/full-upgrade/run-upgrade.sh index 5b68b6dfa941..6a847e909ba0 100755 --- a/addons/full-upgrade/run-upgrade.sh +++ b/addons/full-upgrade/run-upgrade.sh @@ -232,8 +232,7 @@ echo "Stopping the PacketFence services" main_splitter export_to="/root/packetfence-pre-upgrade-backup-`date '+%s'`.tgz" echo "Generating full pre-upgrade backup to $export_to" -/usr/local/pf/addons/backup-and-maintenance.sh -/usr/local/pf/addons/full-import/export.sh $export_to +/usr/local/pf/addons/exportable-backup.sh -f $export_to main_splitter INCLUDE_OS_UPDATE="${INCLUDE_OS_UPDATE:-}" From 0197256fff3d0a6ced18d1e0e5b1ef5213eefa61 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Thu, 26 Sep 2024 13:37:19 -0400 Subject: [PATCH 024/176] Fix venom backup restore DB test --- t/venom/test_suites/backup/backup_db.yml | 2 +- .../00_backup_db_and_restore.yml | 39 +++---------------- .../backup_db_and_restore/TESTSUITE.md | 8 +--- 3 files changed, 8 insertions(+), 41 deletions(-) diff --git a/t/venom/test_suites/backup/backup_db.yml b/t/venom/test_suites/backup/backup_db.yml index 60919974a50d..2d8c985e3d2c 100644 --- a/t/venom/test_suites/backup/backup_db.yml +++ b/t/venom/test_suites/backup/backup_db.yml @@ -3,4 +3,4 @@ testcases: - name: backup steps: - type: exec - script: /usr/local/pf/addons/backup-and-maintenance.sh + script: /usr/local/pf/addons/exportable-backup.sh diff --git a/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml b/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml index ea1c18641602..4a308198e75d 100644 --- a/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml +++ b/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml @@ -20,52 +20,23 @@ testcases: assertions: - result.statuscode ShouldEqual 201 -- name: backup +- name: create exportable backup steps: - type: exec - script: /usr/local/pf/addons/backup-and-maintenance.sh + script: /usr/local/pf/addons/exportable-backup.sh - name: get_backup_name steps: - type: exec - script: 'find {{.backup_db_and_restore.backup_dir}} -name "packetfence-db-dump-*.sql.gz" -newermt "-1 minute"' + script: 'find {{.backup_db_and_restore.backup_dir}} -name "packetfence-exportable-backup-*.tgz" -newermt "-1 minute"' vars: backup_name: from: result.systemout -- name: unzip_db_backup +- name: import steps: - type: exec - script: 'gunzip {{.get_backup_name.backup_name}}' - -# we only get filename without path -- name: get_backup_name_uncompressed - steps: - - type: exec - script: 'basename {{.get_backup_name.backup_name}} .gz' - vars: - backup_name_uncompressed: - from: result.systemout - -- name: drop_pf_db - steps: - - type: exec - script: mysql -e "DROP DATABASE pf;" - -- name: create_pf_db - steps: - - type: exec - script: mysql -e "CREATE DATABASE pf;" - -- name: restore_schema - steps: - - type: exec - script: 'mysql pf < /usr/local/pf/db/pf-schema.sql' - -- name: restore_db - steps: - - type: exec - script: 'mysql pf < {{.backup_db_and_restore.backup_dir}}/{{.get_backup_name_uncompressed.backup_name_uncompressed}}' + script: '/usr/local/pf/addons/full-import/import.sh --db -f {{ .backup_name }}' - name: search_user_in_db steps: diff --git a/t/venom/test_suites/backup_db_and_restore/TESTSUITE.md b/t/venom/test_suites/backup_db_and_restore/TESTSUITE.md index d56d1a7554d6..77d38f432678 100644 --- a/t/venom/test_suites/backup_db_and_restore/TESTSUITE.md +++ b/t/venom/test_suites/backup_db_and_restore/TESTSUITE.md @@ -5,12 +5,8 @@ MariaDB running and available using UNIX socket ## Scenario steps 1. Create user in DB using API -2. Backup files and DB with backup-and-maintenance script -3. Check DB file has been created by backup script -4. Unzip DB file -5. Drop DB -6. Recreate DB based on current schema -7. Restore data from backup DB file +2. Backup files and DB with exportable-backup script +7. Import only db from backup 8. Check that user created at first step is still here using API: validate that application is running after DB restore From 6631229d7674c0f2dd3c82d0df94fb7b4703263b Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Thu, 26 Sep 2024 13:59:04 -0400 Subject: [PATCH 025/176] Update doc with exprtable backup --- docs/cluster/layer_3_clusters.asciidoc | 16 +++------------- docs/cluster/maintenance_and_operations.asciidoc | 11 ++++++----- .../export_import_mechanism.asciidoc | 9 +-------- .../performance_optimizations.asciidoc | 6 +++--- 4 files changed, 13 insertions(+), 29 deletions(-) diff --git a/docs/cluster/layer_3_clusters.asciidoc b/docs/cluster/layer_3_clusters.asciidoc index 1baa48459708..8576ea2f9914 100644 --- a/docs/cluster/layer_3_clusters.asciidoc +++ b/docs/cluster/layer_3_clusters.asciidoc @@ -416,7 +416,7 @@ First restart packetfence-mariadb on all of the servers in the main cluster. systemctl restart packetfence-mariadb -Run the /usr/local/pf/addons/backup-and-maintenance.sh script on the master node of the main cluster. If you do not know which server is the master run this command on all nodes in the main cluster and only the master will create a backup file (eg: /root/backup/packetfence-db-dump-innobackup-YYYY-MM-DD_HHhss.xbstream.gz). +Run the /usr/local/pf/addons/exportable-backup.sh script on the master node of the main cluster. If you do not know which server is the master run this command on all nodes in the main cluster and only the master will create a backup file (eg: /root/backup/packetfence-exportable-backup-YYYY-MM-DD_HHhss.tgz). Transfer this file to the remote server (eg: /root/backup/) Connect to the remote server and perform the following to sync the configuration from the master cluster: @@ -425,18 +425,8 @@ Connect to the remote server and perform the following to sync the configuration /usr/local/pf/bin/pfcmd configreload hard Then the following command to import the backup: - - mkdir /root/backup/restore - cd /root/backup/restore - cp ../packetfence-db-dump-innobackup-YYYY-MM-DD_HHhss.xbstream.gz . - gunzip packetfence-db-dump-innobackup-YYYY-MM-DD_HHhss.xbstream.gz - mbstream -x < packetfence-db-dump-innobackup-YYYY-MM-DD_HHhss.xbstream - mv packetfence-db-dump-innobackup-YYYY-MM-DD_HHhss.xbstream ../ - mariabackup --prepare --target-dir=./ - systemctl stop packetfence-mariadb - rm -fr /var/lib/mysql/* - mariabackup --innobackupex --defaults-file=/usr/local/pf/var/conf/mariadb.conf --move-back --force-non-empty-directories ./ - chown -R mysql: /var/lib/mysql + + /usr/local/pf/addons/full-import/import.sh --db -f /root/backup/packetfence-exportable-backup-YYYY-MM-DD_HHhss.tgz systemctl start packetfence-mariadb On the master node of the main cluster, grant replication for the replication user: diff --git a/docs/cluster/maintenance_and_operations.asciidoc b/docs/cluster/maintenance_and_operations.asciidoc index d2d0dcabcb90..c94e54b8cf20 100644 --- a/docs/cluster/maintenance_and_operations.asciidoc +++ b/docs/cluster/maintenance_and_operations.asciidoc @@ -146,20 +146,21 @@ If you need to externalize those backups, they are in: ---- /root/backup ---- -Files description: +File description: -* `packetfence-db-dump-innobackup-DATE_00h30.xbstream.gz` are the SQL dump of your MariaDB database. -* `packetfence-files-dump-DATE_00h30.tgz` are the dump of the PacketFence files. +* packetfence-exportable-backup-DATE_00h30.tgz is an exportable packetfence backup that contains: + * `packetfence-db-dump-innobackup-DATE_00h30.xbstream.gz` are the SQL dump of your MariaDB database. + * `packetfence-config-dump-DATE_00h30.tgz` are the dump of the PacketFence files. ==== Manual backups In case you need to make a "manual" backup, you can type the following command: ---- -/usr/local/pf/addons/backup-and-maintenance.sh +/usr/local/pf/addons/exportable-backup.sh ---- -As the daily automatic backups, you will find the files in: +As the daily automatic backups, you will find the file in: ---- /root/backup/ diff --git a/docs/installation/export_import_mechanism.asciidoc b/docs/installation/export_import_mechanism.asciidoc index c1bcf76586fd..31af47a5b1bc 100644 --- a/docs/installation/export_import_mechanism.asciidoc +++ b/docs/installation/export_import_mechanism.asciidoc @@ -93,14 +93,7 @@ configuration in your export, run: [source,bash] ---- -/usr/local/pf/addons/backup-and-maintenance.sh ----- - -Next, run the export script: - -[source,bash] ----- -/usr/local/pf/addons/full-import/export.sh /tmp/export.tgz +/usr/local/pf/addons/exportable-backup.sh -f /tmp/export.tgz ---- The command above will create your export archive in [filename]`/tmp/export.tgz`. You will diff --git a/docs/installation/performance_optimizations.asciidoc b/docs/installation/performance_optimizations.asciidoc index 876cd8e77b29..08b90a3f275d 100644 --- a/docs/installation/performance_optimizations.asciidoc +++ b/docs/installation/performance_optimizations.asciidoc @@ -297,7 +297,7 @@ This will grind PacketFence to a halt so you want to avoid that at all cost. One ==== Using MariaDB-backup -When dealing with a large database, the database backup and maintenance script (`/usr/local/pf/addons/backup-and-maintenance.sh`) which uses mysqldump may create a long lock on your database which may cause service to hang. +When dealing with a large database, the database backup and maintenance script (`/usr/local/pf/addons/exportable-backup.sh`) which uses mysqldump may create a long lock on your database which may cause service to hang. This is fixed easily by using MariaDB-backup which can complete a full database backup without locking your tables. @@ -325,9 +325,9 @@ Once this is done, grant the proper rights to the `pf` user (or the one you conf MariaDB> GRANT PROCESS, RELOAD, LOCK TABLES, REPLICATION CLIENT ON *.* TO 'pf'@'localhost'; MariaDB> FLUSH PRIVILEGES; -Next, run the maintenance script [filename]`/usr/local/pf/addons/backup-and-maintenance.sh` and ensure that the following line is part of the output: +Next, run the maintenance script [filename]`/usr/local/pf/addons/exportable-backup.sh` and ensure that the following line is part of the output: - innobackupex: completed OK! + Exportable backup is done If the backup fails, check [filename]`/usr/local/pf/logs/innobackup.log` for details and refer to the MariaDB-backup documentation for troubleshooting. From 5a6197e45010f73782165586baad082ce748c009 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Thu, 26 Sep 2024 13:59:39 -0400 Subject: [PATCH 026/176] Add end of script done --- addons/exportable-backup.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/addons/exportable-backup.sh b/addons/exportable-backup.sh index 71a166552186..85686b3eed37 100755 --- a/addons/exportable-backup.sh +++ b/addons/exportable-backup.sh @@ -169,5 +169,4 @@ clean_backup if [ $do_replication == 1 ]; then replicate_backup fi - -exit $BACKUPRC +echo "Exportable backup is done" From ab9be0873b1205f62e0c378a1496ad76d7f3f778 Mon Sep 17 00:00:00 2001 From: James Rouzier Date: Thu, 26 Sep 2024 19:24:03 +0000 Subject: [PATCH 027/176] Update tests --- t/unittest/UnifiedApi/Controller/Config/OPTIONS.t | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/t/unittest/UnifiedApi/Controller/Config/OPTIONS.t b/t/unittest/UnifiedApi/Controller/Config/OPTIONS.t index 8cab4a50d82d..782579892914 100755 --- a/t/unittest/UnifiedApi/Controller/Config/OPTIONS.t +++ b/t/unittest/UnifiedApi/Controller/Config/OPTIONS.t @@ -366,7 +366,7 @@ meta => { value => 'update_switch_role_network', sibling => { api_parameters => { - default => 'mac, $mac, ip, $ip, mask, $mask, lease_length, $lease_length', + default => 'mac, $mac, ip, $ip', } }, } From 18ba3fe0be4b45ed5c2418a9ed44d606608f670e Mon Sep 17 00:00:00 2001 From: snyk-bot Date: Sat, 28 Sep 2024 08:40:46 +0000 Subject: [PATCH 028/176] fix: containers/pfsetacls/Dockerfile to reduce vulnerabilities The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-DEBIAN12-EXPAT-7855502 - https://snyk.io/vuln/SNYK-DEBIAN12-EXPAT-7855503 - https://snyk.io/vuln/SNYK-DEBIAN12-EXPAT-7855507 - https://snyk.io/vuln/SNYK-DEBIAN12-GIT-6846203 - https://snyk.io/vuln/SNYK-DEBIAN12-GIT-6846203 --- containers/pfsetacls/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/containers/pfsetacls/Dockerfile b/containers/pfsetacls/Dockerfile index 49a536bd0bd1..48021c43bed8 100644 --- a/containers/pfsetacls/Dockerfile +++ b/containers/pfsetacls/Dockerfile @@ -1,5 +1,5 @@ -FROM golang:1.23.0-bookworm +FROM golang:1.23.1-bookworm ENV SEMAPHORE_VERSION="development" SEMAPHORE_ARCH="linux_amd64" \ SEMAPHORE_CONFIG_PATH="${SEMAPHORE_CONFIG_PATH:-/etc/semaphore}" \ From 251a58c44e21c2d69e2884eb6e5632642f907015 Mon Sep 17 00:00:00 2001 From: Satkunas <3904468+satkunas@users.noreply.github.com> Date: Mon, 30 Sep 2024 10:17:54 -0400 Subject: [PATCH 029/176] fix: go/chisel/Dockerfile to reduce vulnerabilities (#8287) The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-ALPINE319-BUSYBOX-6913413 - https://snyk.io/vuln/SNYK-ALPINE319-BUSYBOX-6928845 - https://snyk.io/vuln/SNYK-ALPINE319-BUSYBOX-6928846 - https://snyk.io/vuln/SNYK-ALPINE319-BUSYBOX-6928846 - https://snyk.io/vuln/SNYK-ALPINE319-BUSYBOX-6928847 Co-authored-by: snyk-bot --- go/chisel/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/chisel/Dockerfile b/go/chisel/Dockerfile index 883e4680cb15..3155bd9d6da8 100644 --- a/go/chisel/Dockerfile +++ b/go/chisel/Dockerfile @@ -10,7 +10,7 @@ RUN go build \ -ldflags "-X github.com/inverse-inc/packetfence/go/chisel/share.BuildVersion=$(git describe --abbrev=0 --tags)" \ -o chisel # container stage -FROM alpine:3.19.1 +FROM alpine:3.19.4 RUN apk update && apk add --no-cache ca-certificates WORKDIR /app COPY --from=build-env /src/chisel /app/chisel From 075b85e40230066a8763bd8051923571f265b997 Mon Sep 17 00:00:00 2001 From: Satkunas <3904468+satkunas@users.noreply.github.com> Date: Mon, 30 Sep 2024 10:18:15 -0400 Subject: [PATCH 030/176] fix: containers/fingerbank-db/Dockerfile to reduce vulnerabilities (#8286) The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-ALPINE319-BUSYBOX-6913413 - https://snyk.io/vuln/SNYK-ALPINE319-BUSYBOX-6913413 - https://snyk.io/vuln/SNYK-ALPINE319-BUSYBOX-6928845 - https://snyk.io/vuln/SNYK-ALPINE319-BUSYBOX-6928846 - https://snyk.io/vuln/SNYK-ALPINE319-BUSYBOX-6928847 Co-authored-by: snyk-bot --- containers/fingerbank-db/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/containers/fingerbank-db/Dockerfile b/containers/fingerbank-db/Dockerfile index 738cd1864663..b9220728cc10 100644 --- a/containers/fingerbank-db/Dockerfile +++ b/containers/fingerbank-db/Dockerfile @@ -4,7 +4,7 @@ FROM ${KNK_REGISTRY_URL}/pfdebian:${IMAGE_TAG} RUN apt update && apt install fingerbank --reinstall -FROM alpine:3.19.1 +FROM alpine:3.19.4 ARG FINGERBANK_BUILD_API_KEY From 8807217cbe6974e67130485f20b249860507b0da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cabprasa=E2=80=9D?= <123509565+abprasa@users.noreply.github.com> Date: Mon, 30 Sep 2024 14:49:43 +0000 Subject: [PATCH 031/176] changed role for ArubaOS_CX_10_x.pm --- lib/pf/Switch/Aruba/ArubaOS_CX_10_x.pm | 11 + lib/pf/Switch/Aruba/CX.pm | 278 ------------------------- 2 files changed, 11 insertions(+), 278 deletions(-) delete mode 100644 lib/pf/Switch/Aruba/CX.pm diff --git a/lib/pf/Switch/Aruba/ArubaOS_CX_10_x.pm b/lib/pf/Switch/Aruba/ArubaOS_CX_10_x.pm index 16af81524495..a8e1579fbc81 100644 --- a/lib/pf/Switch/Aruba/ArubaOS_CX_10_x.pm +++ b/lib/pf/Switch/Aruba/ArubaOS_CX_10_x.pm @@ -418,6 +418,17 @@ sub acl_chewer { return $acl_chewed; } +=item returnRoleAttribute + +What RADIUS Attribute (usually VSA) should the role returned into. + +=cut + +sub returnRoleAttribute { + my ($self) = @_; + + return 'Aruba-User-Role'; +} =back diff --git a/lib/pf/Switch/Aruba/CX.pm b/lib/pf/Switch/Aruba/CX.pm deleted file mode 100644 index 2da19c7ec963..000000000000 --- a/lib/pf/Switch/Aruba/CX.pm +++ /dev/null @@ -1,278 +0,0 @@ -package pf::Switch::Aruba::CX; - -=head1 NAME - -pf::Switch::Aruba::CX - -=head1 SYNOPSIS - -Module to manage rebanded Aruba HP CX Switch - -=head1 STATUS - -=over - -=item Supports - -=over - -=item MAC-Authentication - -=item 802.1X - -=item Radius downloadable ACL support - -=item Voice over IP - -=item Radius CLI Login - -=back - -=back - -Has been reported to work on Aruba CX - -=cut - -use strict; -use warnings; -use Net::SNMP; - -use base ('pf::Switch::Aruba::5400'); - -use pf::constants; -use pf::config qw( - $MAC - $PORT - $WIRED_802_1X - $WIRED_MAC_AUTH -); - -use pf::Switch::constants; -use pf::util; -use pf::util::radius qw(perform_disconnect perform_coa); -use Try::Tiny; -use pf::locationlog; - -sub description { 'Aruba CX Switch' } - -# CAPABILITIES -# access technology supported -# VoIP technology supported -use pf::SwitchSupports qw( - PushACLs -); - -sub radiusDisconnect { - my ($self, $mac, $add_attributes_ref) = @_; - my $logger = $self->logger; - - # initialize - $add_attributes_ref = {} if (!defined($add_attributes_ref)); - - if (!defined($self->{'_radiusSecret'})) { - $logger->warn( - "Unable to perform RADIUS CoA-Request on $self->{'_ip'}: RADIUS Shared Secret not configured" - ); - return; - } - # Where should we send the RADIUS CoA-Request? - # to network device by default - my $nas_port = $self->{'_disconnectPort'} || '3799'; - my $send_disconnect_to = $self->{'_ip'}; - # but if controllerIp is set, we send there - if (defined($self->{'_controllerIp'}) && $self->{'_controllerIp'} ne '') { - $logger->info("controllerIp is set, we will use controller $self->{_controllerIp} to perform deauth"); - $send_disconnect_to = $self->{'_controllerIp'}; - } - # allowing client code to override where we connect with NAS-IP-Address - $send_disconnect_to = $add_attributes_ref->{'NAS-IP-Address'} - if (defined($add_attributes_ref->{'NAS-IP-Address'})); - - my $response; - try { - my $connection_info = $self->radius_deauth_connection_info($send_disconnect_to); - $connection_info->{nas_port} = $nas_port; - my $locationlog = locationlog_view_open_mac($mac); - $logger->debug("network device supports roles. Evaluating role to be returned"); - my $roleResolver = pf::roles::custom->instance(); - my $role = $roleResolver->getRoleForNode($mac, $self); - - # transforming MAC to the expected format 00112233CAFE - my $calling_station_id = uc($mac); - $calling_station_id =~ s/:/-/g; - $mac = lc($mac); - $mac =~ s/://g; - - # Standard Attributes - my $attributes_ref = { - 'User-Name' => $mac, - 'NAS-IP-Address' => $send_disconnect_to, - 'Calling-Station-Id' => $calling_station_id, - 'NAS-Port' => $locationlog->{port}, - }; - # merging additional attributes provided by caller to the standard attributes - $attributes_ref = { %$attributes_ref, %$add_attributes_ref }; - - if ( $self->shouldUseCoA({role => $role}) ) { - - $attributes_ref = { - %$attributes_ref, - 'Filter-Id' => $role, - }; - $logger->info("[$self->{'_ip'}] Returning ACCEPT with role: $role"); - $response = perform_coa($connection_info, $attributes_ref); - - } - else { - $response = perform_disconnect($connection_info, $attributes_ref); - } - } catch { - chomp; - $logger->warn("Unable to perform RADIUS CoA-Request: $_"); - $logger->error("Wrong RADIUS secret or unreachable network device...") if ($_ =~ /^Timeout/); - }; - return if (!defined($response)); - - return $TRUE if ( ($response->{'Code'} eq 'Disconnect-ACK') || ($response->{'Code'} eq 'CoA-ACK') ); - - $logger->warn( - "Unable to perform RADIUS Disconnect-Request." - . ( defined($response->{'Code'}) ? " $response->{'Code'}" : 'no RADIUS code' ) . ' received' - . ( defined($response->{'Error-Cause'}) ? " with Error-Cause: $response->{'Error-Cause'}." : '' ) - ); - return; -} - -sub wiredeauthTechniques { - my ($self, $method, $connection_type) = @_; - my $logger = $self->logger; - if ($connection_type == $WIRED_802_1X) { - my $default = $SNMP::SNMP; - my %tech = ( - $SNMP::SNMP => 'dot1xPortReauthenticate', - $SNMP::RADIUS => 'deauthenticateMacRadius', - ); - - if (!defined($method) || !defined($tech{$method})) { - $method = $default; - } - return $method,$tech{$method}; - } - if ($connection_type == $WIRED_MAC_AUTH) { - my $default = $SNMP::SNMP; - my %tech = ( - $SNMP::SNMP => 'handleReAssignVlanTrapForWiredMacAuth', - $SNMP::RADIUS => 'deauthenticateMacRadius', - ); - - if (!defined($method) || !defined($tech{$method})) { - $method = $default; - } - return $method,$tech{$method}; - } -} - -=head2 deauthenticateMacRadius - -Method to deauth a wired node with CoA. - -=cut - -sub deauthenticateMacRadius { - my ($self, $ifIndex,$mac) = @_; - my $logger = $self->logger; - - - # perform CoA - $self->radiusDisconnect($mac); -} - -=head2 acl_chewer - -Format ACL to match with the expected switch format. - -=cut - -sub acl_chewer { - my ($self, $acl, $role) = @_; - my $logger = $self->logger; - my ($acl_ref , @direction) = $self->format_acl($acl); - - my $i = 0; - my $acl_chewed; - foreach my $acl (@{$acl_ref->{'packetfence'}->{'entries'}}) { - #Bypass acl that contain tcp_flag, it doesnt apply correctly on the switch - next if (defined($acl->{'tcp_flags'})); - $acl->{'protocol'} =~ s/\(\d*\)//; - my $dest; - my $dest_port; - if (defined($acl->{'destination'}->{'port'})) { - $dest_port = $acl->{'destination'}->{'port'}; - $dest_port =~ s/\w+\s+//; - } - if ($acl->{'destination'}->{'ipv4_addr'} eq '0.0.0.0') { - $dest = "any"; - } elsif($acl->{'destination'}->{'ipv4_addr'} ne '0.0.0.0') { - if ($acl->{'destination'}->{'wildcard'} ne '0.0.0.0') { - $dest = $acl->{'destination'}->{'ipv4_addr'}."/".norm_net_mask($acl->{'destination'}->{'wildcard'}); - } else { - $dest = $acl->{'destination'}->{'ipv4_addr'}; - } - } - my $src; - if ($acl->{'source'}->{'ipv4_addr'} eq '0.0.0.0') { - $src = "any"; - } elsif($acl->{'source'}->{'ipv4_addr'} ne '0.0.0.0') { - if ($acl->{'source'}->{'wildcard'} ne '0.0.0.0') { - $src = $acl->{'source'}->{'ipv4_addr'}."/".norm_net_mask($acl->{'source'}->{'wildcard'}); - } else { - $src = $acl->{'source'}->{'ipv4_addr'}; - } - } - my $j = $i + 1; - if ($self->usePushACLs && (whowasi() eq "pf::Switch::getRoleAccessListByName")) { - $acl_chewed .= ((defined($direction[$i]) && $direction[$i] ne "") ? $direction[$i]."|" : "").$j." ".$acl->{'action'}." ".$acl->{'protocol'}." ".(($self->usePushACLs) ? $src : "any")." $dest " . ( defined($acl->{'destination'}->{'port'}) ? $acl->{'destination'}->{'port'} : '' )."\n"; - } else { - $acl_chewed .= ((defined($direction[$i]) && $direction[$i] ne "") ? $direction[$i]."|" : "").$acl->{'action'}." ".((defined($direction[$i]) && $direction[$i] ne "") ? $direction[$i] : "in")." ".$acl->{'protocol'}." from any to ".$dest." ".( defined($dest_port) ? $dest_port : '' )."\n"; - } - $i++; - } - return $acl_chewed; -} - - -=back - -=head1 AUTHOR - -Inverse inc. - -=head1 COPYRIGHT - -Copyright (C) 2005-2024 Inverse inc. - -=head1 LICENSE - -This program is free software; you can redistribute it and/or -modify it under the terms of the GNU General Public License -as published by the Free Software Foundation; either version 2 -of the License, or (at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, -USA. - -=cut - -1; - -# vim: set shiftwidth=4: -# vim: set expandtab: -# vim: set backspace=indent,eol,start: From 2b3c7a92da3afc72c7328ac2dab2cb35e18a4983 Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Mon, 30 Sep 2024 17:12:25 -0400 Subject: [PATCH 032/176] Fixes test --- lib/pf/cidr_role.pm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/pf/cidr_role.pm b/lib/pf/cidr_role.pm index 97c3a6b9db02..b1194dde8dab 100644 --- a/lib/pf/cidr_role.pm +++ b/lib/pf/cidr_role.pm @@ -69,7 +69,7 @@ sub update_switch_role_with_mask { my $switch = pf::SwitchFactory->instantiate({ switch_mac => $locationlog->{'switch_mac'}, switch_ip => $locationlog->{'switch_ip'}, switch_id => $locationlog->{'switch'}}); return undef unless ($switch); return undef unless (pf::util::isenabled($switch->{'_NetworkMap'})); - return undef unless $switch->{"_".$locationlog->{'role'}."NetworkFrom"} ne "dynamic"; + return undef unless $switch->{"_".$locationlog->{'role'}."NetworkFrom"} eq "dynamic"; my $networks = $switch->cache_distributed->get($locationlog->{'switch'}.".".$locationlog->{'role'}); From f54a3fe9f46153989b5922c2b5e466bc79c3469e Mon Sep 17 00:00:00 2001 From: James Rouzier Date: Tue, 1 Oct 2024 00:55:05 +0000 Subject: [PATCH 033/176] Add test for lookup.LookupByRoles --- go/cron/network_event.go | 15 +++++++--- go/cron/policy_lookup.go | 55 ++++++++++++++++++++++++++--------- go/cron/policy_lookup_test.go | 52 +++++++++++++++++++++++++++++++++ 3 files changed, 105 insertions(+), 17 deletions(-) diff --git a/go/cron/network_event.go b/go/cron/network_event.go index 5e3d498b0cc7..166609fd65dc 100644 --- a/go/cron/network_event.go +++ b/go/cron/network_event.go @@ -195,16 +195,23 @@ type NetworkTranslationInfo struct { } func (ne *NetworkEvent) GetSrcRole(ctx context.Context, db *sql.DB) (string, string) { - src := ne.SourceInventoryItem - if src == nil { + return ne.getRoleFromInventory(ctx, db, ne.SourceInventoryItem) +} + +func (ne *NetworkEvent) GetDstRole(ctx context.Context, db *sql.DB) (string, string) { + return ne.getRoleFromInventory(ctx, db, ne.DestInventoryitem) +} + +func (ne *NetworkEvent) getRoleFromInventory(ctx context.Context, db *sql.DB, item *InventoryItem) (string, string) { + if item == nil { return "", "" } - if len(src.ExternalIDS) == 0 { + if len(item.ExternalIDS) == 0 { return "", "" } - mac := src.ExternalIDS[0] + mac := item.ExternalIDS[0] if mac == "" || mac == "00:00:00:00:00:00" { return "", "" } diff --git a/go/cron/policy_lookup.go b/go/cron/policy_lookup.go index 3199c19c4bab..7b751a33993d 100644 --- a/go/cron/policy_lookup.go +++ b/go/cron/policy_lookup.go @@ -247,26 +247,55 @@ type PolicyLookup struct { } func (l PolicyLookup) Lookup(ctx context.Context, db *sql.DB, ne *NetworkEvent) *EnforcementInfo { - mac, role := ne.GetSrcRole(ctx, db) - if mac == "" || role == "" { + srcMac, srcRole := ne.GetSrcRole(ctx, db) + dstMac, dstRole := ne.GetDstRole(ctx, db) + if ei := l.LookupByMac(srcMac, ne); ei != nil { + return ei + } + + if ei := l.LookupByMac(dstMac, ne); ei != nil { + return ei + } + + if ei := l.LookupByRoles(srcRole, ne); ei != nil { + return ei + } + + if ei := l.LookupByRoles(dstRole, ne); ei != nil { + return ei + } + + if srcMac != "" { + return l.LookupImplict(ne) + } + + return nil +} + +func (l *PolicyLookup) LookupByRoles(role string, ne *NetworkEvent) *EnforcementInfo { + policies, ok := l.ByRoles[role] + if !ok { return nil } - if policies, ok := l.NodesPolicies[mac]; ok { - ei := matchEnforcementInfo(policies, ne) - if ei != nil { - return ei - } + if ei := matchEnforcementInfo(policies, ne); ei != nil { + return ei } - if policies, ok := l.ByRoles[role]; ok { - ei := matchEnforcementInfo(policies, ne) - if ei != nil { - return ei - } + return nil +} + +func (l *PolicyLookup) LookupByMac(mac string, ne *NetworkEvent) *EnforcementInfo { + policies, ok := l.NodesPolicies[mac] + if !ok { + return nil } - return l.LookupImplict(ne) + if ei := matchEnforcementInfo(policies, ne); ei != nil { + return ei + } + + return nil } func matchEnforcementInfo(policies []Policy, ne *NetworkEvent) *EnforcementInfo { diff --git a/go/cron/policy_lookup_test.go b/go/cron/policy_lookup_test.go index 5855946fcb7c..d6213d430667 100644 --- a/go/cron/policy_lookup_test.go +++ b/go/cron/policy_lookup_test.go @@ -562,6 +562,36 @@ const RolesPoliciesMapJSON = ` "permit tcp any 10.15.1.0 0.0.0.255 eq 3389", "permit udp any 10.15.1.0 0.0.0.255 eq 3389" ] + }, + { + "enforcement_info": [ + { + "policy-revision": 3, + "verdict": "allow", + "dc-inventory-revision": 1725462233, + "rule-id": "28477cf7-234e-4751-8ced-542464017b1c/" + } + ], + "acls": [ + "permit tcp any 10.15.1.0 0.0.0.255 eq 3389", + "permit udp any 10.15.1.0 0.0.0.255 eq 3389" + ] + }, + { + "enforcement_info": [ + { + "policy-revision": 66, + "verdict": "allow", + "dc-inventory-revision": 1727715416, + "rule-id": "d2cdcbd9-5acd-4021-ba96-fdecbbf77473/" + } + ], + "acls": [ + "#permit tcp any host 00:50:56:9d:44:ca eq 222", + "#permit udp any host 00:50:56:9d:44:ca eq 222", + "#permit tcp any host 00:50:56:9d:44:ca eq 333", + "#permit udp any host 00:50:56:9d:44:ca eq 333" + ] } ] }, @@ -608,4 +638,26 @@ func TestPolicyLoad(t *testing.T) { } lookup.UpdateMatchers() + ne := NetworkEvent{ + DestPort: 222, + SourceIp: netip.AddrFrom4([4]byte{10, 0, 0, 1}), + DestIp: netip.AddrFrom4([4]byte{10, 0, 0, 3}), + IpProtocol: IpProtocolUdp, + DestInventoryitem: &InventoryItem{ + ExternalIDS: []string{"00:50:56:9d:44:ca"}, + }, + } + + if diff := cmp.Diff( + lookup.LookupByRoles("IoT-Lighting", &ne), + &EnforcementInfo{ + RuleID: "d2cdcbd9-5acd-4021-ba96-fdecbbf77473/", + Verdict: "allow", + PolicyRevision: 66, + DcInventoryRevision: 1727715416, + }, + ); diff != "" { + t.Fatalf("LookupByRoles does not match %s", diff) + } + } From 24567a4a2732e4a0d13ec3c1ba07156723c31752 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Tue, 1 Oct 2024 15:46:39 -0400 Subject: [PATCH 034/176] Move proxysql from pfbuild to pfdebian --- containers/proxysql/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/containers/proxysql/Dockerfile b/containers/proxysql/Dockerfile index c8fddbb21abb..2e5306f2b396 100644 --- a/containers/proxysql/Dockerfile +++ b/containers/proxysql/Dockerfile @@ -1,6 +1,6 @@ ARG KNK_REGISTRY_URL ARG IMAGE_TAG -FROM ${KNK_REGISTRY_URL}/pfbuild-debian-bookworm:${IMAGE_TAG} +FROM ${KNK_REGISTRY_URL}/pfdebian:${IMAGE_TAG} RUN apt-get -qq update \ && apt-get clean \ From 85ee0b51396428c127fb3da8b247f14856f5dc23 Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Wed, 2 Oct 2024 09:52:46 -0400 Subject: [PATCH 035/176] Added missing expires_jitter in jamfCloud provisioner --- lib/pf/provisioner/jamfCloud.pm | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/pf/provisioner/jamfCloud.pm b/lib/pf/provisioner/jamfCloud.pm index 09a928191343..557e4443623a 100644 --- a/lib/pf/provisioner/jamfCloud.pm +++ b/lib/pf/provisioner/jamfCloud.pm @@ -114,6 +114,13 @@ Option to sync PID from provisioner has sync_pid => (is => 'rw', required => 1); +=head2 + +expires_jiiter + +=cut + +has expires_jitter => (is => 'rw', default => sub { 10 } ); =head1 Methods From 5166732ac48230368107f951dd92b6077e76ade6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cabprasa=E2=80=9D?= <123509565+abprasa@users.noreply.github.com> Date: Thu, 3 Oct 2024 09:45:36 +0000 Subject: [PATCH 036/176] created migration script for CX --- .../upgrade/to-14.1-convert-switch-types.pl | 82 +++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100755 addons/upgrade/to-14.1-convert-switch-types.pl diff --git a/addons/upgrade/to-14.1-convert-switch-types.pl b/addons/upgrade/to-14.1-convert-switch-types.pl new file mode 100755 index 000000000000..743d38814c41 --- /dev/null +++ b/addons/upgrade/to-14.1-convert-switch-types.pl @@ -0,0 +1,82 @@ +#!/usr/bin/perl + +=head1 NAME + +to-13.1-convert-switch-types.pl + +=head1 DESCRIPTION + +Convert some switch types and use Switch OS versions + +=cut + +use strict; +use warnings; +use lib qw(/usr/local/pf/lib); +use lib qw(/usr/local/pf/lib_perl/lib/perl5); +use pf::util qw(run_as_pf); +use pf::IniFiles; +use pf::file_paths qw( + $switches_config_file +); +use File::Copy; + +run_as_pf(); + +my $file = $switches_config_file; + +if (@ARGV) { + $file = $ARGV[0]; +} + +our %types = ( + 'Aruba::CX' => 'Aruba::ArubaOS_CX_10_x', +); + +my $cs = pf::IniFiles->new(-file => $file, -allowempty => 1); + +my $update = 0; +for my $section ($cs->Sections()) { + my $type = $cs->val($section, 'type'); + next if !defined $type || !exists $types{$type}; + my $new_type = $types{$type}; + $cs->setval($section, 'type', $new_type); + $update |= 1; +} + +if ($update) { + $cs->RewriteConfig(); + print "All done\n"; + exit 0; +} + + +print "Nothing to be done\n"; + +=head1 AUTHOR + +Inverse inc. + +=head1 COPYRIGHT + +Copyright (C) 2005-2024 Inverse inc. + +=head1 LICENSE + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, +USA. + +=cut + From 341b0d65f1db9ff36a946b086acb1374f4258d2d Mon Sep 17 00:00:00 2001 From: Zhihao Ma Date: Mon, 7 Oct 2024 10:58:55 -0400 Subject: [PATCH 037/176] enable MD4 support for python / impacket fixes #8301 --- containers/pfperl-api/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/containers/pfperl-api/Dockerfile b/containers/pfperl-api/Dockerfile index 05c0e46a7ec4..040e52e9cf3c 100644 --- a/containers/pfperl-api/Dockerfile +++ b/containers/pfperl-api/Dockerfile @@ -7,6 +7,7 @@ COPY ./lib ./lib COPY ./db ./db COPY ./sbin/pfperl-api ./sbin/pfperl-api COPY bin/pfcmd.pl bin/pfcmd.pl +COPY addons/ntlm-auth-api/openssl.cnf /etc/ssl/openssl.cnf RUN ln -s /usr/local/pf/bin/pfcmd.pl /usr/local/pf/bin/pfcmd RUN ln -s /usr/share/doc/python3-impacket/examples/addcomputer.py /usr/local/pf/bin/impacket-addcomputer RUN mkdir -p /usr/local/pf/var/run/ && chmod 0744 /usr/local/pf/var/run/ From b360f162433e7e7e89ddf9c2faef5fbaa5e77264 Mon Sep 17 00:00:00 2001 From: James Rouzier Date: Tue, 8 Oct 2024 13:53:58 +0000 Subject: [PATCH 038/176] Skip sflow --- go/cron/pfflowjob.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/go/cron/pfflowjob.go b/go/cron/pfflowjob.go index 6a065baf20d5..cbf74a2deedb 100644 --- a/go/cron/pfflowjob.go +++ b/go/cron/pfflowjob.go @@ -82,6 +82,10 @@ func (j *PfFlowJob) Run() { continue } + if pfFlows.Header.FlowType == 65535 { + continue + } + ChanPfFlow <- []*PfFlows{pfFlows} } } From 5f8c8f27e62106ae1d179945611d2363279664a0 Mon Sep 17 00:00:00 2001 From: James Rouzier Date: Tue, 8 Oct 2024 13:54:56 +0000 Subject: [PATCH 039/176] Update config example --- conf/kafka.conf.example | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/conf/kafka.conf.example b/conf/kafka.conf.example index 9c42e43c6379..1318518954d9 100644 --- a/conf/kafka.conf.example +++ b/conf/kafka.conf.example @@ -12,10 +12,12 @@ #KAFKA_CONTROLLER_QUORUM_VOTERS=1@172.16.3.1:9093,2@172.16.3.2:9093,3@172.16.3.3:9093 #KAFKA_INTER_BROKER_LISTENER_NAME=INTERNAL #KAFKA_LISTENERS=INTERNAL://0.0.0.0:29092,CONTROLLER://0.0.0.0:9093,EXTERNAL://0.0.0.0:9092 -#KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT +#KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,INTERNAL:PLAINTEXT,EXTERNAL:SASL_PLAINTEXT #KAFKA_LOG_DIRS=/usr/local/pf/var/kafka #KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=2 #KAFKA_PROCESS_ROLES=broker,controller +#KAFKA_OPTS=-Djava.security.auth.login.config=/usr/local/pf/conf/kafka/kafka_server_jaas.conf +#KAFKA_SASL_ENABLED_MECHANISMS=PLAIN ## #### Cluster members ENV Variables with the hostname as the section name ## From c10ed5535b81fcbf070599bd8aee8c11c9b9eb79 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Tue, 8 Oct 2024 10:32:28 -0400 Subject: [PATCH 040/176] fix/8312: Add CLI for upgrade on EL8 in documentation (#8333) * Add CLI for upgrade on EL8 * Add CLI for upgrade on EL8 for cluster --- docs/PacketFence_Upgrade_Guide.asciidoc | 28 +++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/docs/PacketFence_Upgrade_Guide.asciidoc b/docs/PacketFence_Upgrade_Guide.asciidoc index 9e8a5d8f30d6..dfe276dd6e3a 100644 --- a/docs/PacketFence_Upgrade_Guide.asciidoc +++ b/docs/PacketFence_Upgrade_Guide.asciidoc @@ -1638,6 +1638,34 @@ to do. Your export archive will contain your previous == Upgrading from a version prior to X.Y.Z +=== Upgrade Standalone RedHat EL8 + +Please follow these command lines in order to upgrade database. + +---- +yum clean all --enablerepo=packetfence +yum update --enablerepo=packetfence +systemctl stop monit +systemctl disable monit +/usr/local/pf/bin/pfcmd service pf stop +systemctl stop packetfence-mariadb +rpm -e --nodeps MariaDB-server +rpm -e --nodeps MariaDB-client +yum localinstall -y https://www.packetfence.org/downloads/PacketFence/RHEL8/14.1/x86_64/RPMS/MariaDB-client-10.11.6-1.el8.x86_64.rpm +yum localinstall -y https://www.packetfence.org/downloads/PacketFence/RHEL8/14.1/x86_64/RPMS/galera-4-26.4.16-1.el8.x86_64.rpm +yum localinstall -y https://www.packetfence.org/downloads/PacketFence/RHEL8/14.1/x86_64/RPMS/MariaDB-server-10.11.6-1.el8.x86_64.rpm +yum localinstall -y https://www.packetfence.org/downloads/PacketFence/RHEL8/14.1/x86_64/RPMS/freeradius-mysql-3.2.6-1.el8.x86_64.rpm +systemctl start packetfence-mariadb +mysql_upgrade -p +addons/upgrade/do-upgrade.sh +---- + +=== Upgrade Cluster RedHat EL8 + +It is the same as <> +but when you are at that this step <> for node C, +please follow these upgrade instructions <>. + == Archived upgrade notes include::upgrade-notes/archived_upgrade_notes.asciidoc[] From 788b389d42616d09de9beff27376caae4000e701 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Wed, 9 Oct 2024 09:40:10 -0400 Subject: [PATCH 041/176] Update 14.0 upgrade and keep it simple --- docs/PacketFence_Upgrade_Guide.asciidoc | 74 +++++-------------------- 1 file changed, 15 insertions(+), 59 deletions(-) diff --git a/docs/PacketFence_Upgrade_Guide.asciidoc b/docs/PacketFence_Upgrade_Guide.asciidoc index dfe276dd6e3a..1dcc2328bcec 100644 --- a/docs/PacketFence_Upgrade_Guide.asciidoc +++ b/docs/PacketFence_Upgrade_Guide.asciidoc @@ -1509,52 +1509,8 @@ Before continuing, be sure to read <> procedure. - - -===== On RedHat EL8 from PacketFence version 11.1 to 13.2 - -====== Backup database +===== Backup database locally Backup using the following script where the database export is created using mariadb-backup (10.5). This backup is used to Import the database in the new host. @@ -1565,7 +1521,7 @@ Backup using the following script where the database export is created using mar Ensure the backup exists in /root/backup/packetfence-db-dump-innobackup-YYYY-MM-DD_HHhmm.xbstream.gz -====== Export the configuration +===== Prepare the configuration for exportation This export is only used to Import the configuration files in the new host. @@ -1574,31 +1530,31 @@ This export is only used to Import the configuration files in the new host. ---- -====== Prepare backup on Debian 11 +===== Prepare the database on Debian 11 or EL8 -Restore the database backup into a new copy for mariabackup. +Restore locally the database backup into a new copy for mariabackup. ---- -gunzip packetfence-db-dump-innobackup-YYYY-MM-DD_HHhmm.xbstream.gz +gunzip /root/backup/packetfence-db-dump-innobackup-YYYY-MM-DD_HHhmm.xbstream.gz mkdir -p /root/backup/restore/ pushd /root/backup/restore/ -mv packetfence-db-dump-innobackup-YYYY-MM-DD_HHhmm.xbstream /root/backup/restore/ -cd /root/backup/restore/ -mbstream -x < packetfence-db-dump-innobackup-YYYY-MM-DD_HHhmm.xbstream -rm packetfence-db-dump-innobackup-YYYY-MM-DD_HHhmm.xbstream +mv /root/backup/packetfence-db-dump-innobackup-YYYY-MM-DD_HHhmm.xbstream /root/backup/restore/ +mbstream -x < packetfence-db-dump-innobackup-*.xbstream +rm packetfence-db-dump-innobackup-*.xbstream mariabackup --prepare --target-dir=./ ---- - => SCP (copy) the restored files to the Debian 12 server +=> SCP (copy) the restored files and the export.tgz to the Debian 12 server ---- # create the restore directory -ssh root@PACKTFENCE mkdir -p /root/backup/restore/ -scp /root/backup/restore/* root@DEBIAN_12_IP:/root/backup/restore/ +ssh root@PacketFence_Debian_12 mkdir -p /root/backup/restore/ +scp -r /root/backup/restore/* root@PacketFence_Debian_12:/root/backup/restore/ +scp /tmp/export.tgz root@PacketFence_Debian_12:/tmp/export.tgz ---- -====== Import the backup on Debian 12 +===== Import the database on Debian 12 ---- systemctl stop packetfence-mariadb @@ -1614,7 +1570,7 @@ systemctl restart packetfence-mariadb ---- -====== Import the configuration files +===== Import the configuration files on Debian 12 Import only the configuration files, do not import the database. @@ -1627,7 +1583,7 @@ The configuration and database is now migrated to the new host. If all goes well, you can restart services using <>. -====== Additional steps to build or rebuild a cluster +===== Additional steps to build or rebuild a cluster If you want to build or rebuild a cluster, you need to follow instructions in <>. From 3dd21354c953831cd3ca244b52e1d4ed5fb981ed Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Wed, 9 Oct 2024 10:09:29 -0400 Subject: [PATCH 042/176] Added debug messages for SAML IDP response --- lib/pf/Authentication/Source/SAMLSource.pm | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/pf/Authentication/Source/SAMLSource.pm b/lib/pf/Authentication/Source/SAMLSource.pm index 02489e6afaa5..1c2b3411d746 100644 --- a/lib/pf/Authentication/Source/SAMLSource.pm +++ b/lib/pf/Authentication/Source/SAMLSource.pm @@ -18,6 +18,7 @@ use Template::AutoFilter; use File::Slurp qw(read_file write_file); use File::Temp qw(tempfile); use pf::util; +use pf::log; use Moose; extends 'pf::Authentication::Source'; @@ -182,6 +183,12 @@ sub handle_response { my @attribute_list = $assertion->AttributeStatement->Attribute; my $username; + # For debug + foreach my $attribute (@attribute_list){ + get_logger->debug($attribute->Name); + get_logger->debug($attribute->AttributeValue->any->content); + } + foreach my $attribute (@attribute_list){ if($attribute->Name eq $self->username_attribute){ $username = $attribute->AttributeValue->any->content; From 93d3bc441724c723827abaf30ecb2818eb587999 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Wed, 9 Oct 2024 10:34:37 -0400 Subject: [PATCH 043/176] Move and rename default Install grub menu in advanced menu for ISO image --- ci/debian-installer/grub.cfg | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ci/debian-installer/grub.cfg b/ci/debian-installer/grub.cfg index 3e402adcae40..084ea4d6a2b7 100644 --- a/ci/debian-installer/grub.cfg +++ b/ci/debian-installer/grub.cfg @@ -24,16 +24,16 @@ fi insmod play play 960 440 1 0 4 440 1 set theme=/boot/grub/theme/1 -menuentry --hotkey=i 'Install' { - set background_color=black - linux /install.amd/vmlinuz vga=788 net.ifnames=0 --- quiet - initrd /install.amd/initrd.gz -} submenu --hotkey=a 'Advanced options ...' { set menu_color_normal=cyan/blue set menu_color_highlight=white/blue set theme=/boot/grub/theme/1-1 set gfxpayload=keep + menuentry '... Install only Debian without PacketFence' { + set background_color=black + linux /install.amd/vmlinuz vga=788 net.ifnames=0 --- quiet + initrd /install.amd/initrd.gz + } menuentry '... Graphical expert install' { set background_color=black linux /install.amd/vmlinuz priority=low vga=788 --- From a9bc4419f37c62909d08d1afebe9c1745b441009 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Wed, 9 Oct 2024 13:40:58 -0400 Subject: [PATCH 044/176] Bump version to debian 12.6 --- ci/debian-installer/create-debian-installer.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/debian-installer/create-debian-installer.sh b/ci/debian-installer/create-debian-installer.sh index 40a503d2c468..d0b55750f601 100755 --- a/ci/debian-installer/create-debian-installer.sh +++ b/ci/debian-installer/create-debian-installer.sh @@ -8,13 +8,13 @@ function clean() { chmod a+rw $ISO_OUT } -ISO_IN=${ISO_IN:-debian-12.4.0-amd64-netinst.iso} +ISO_IN=${ISO_IN:-debian-12.6.0-amd64-netinst.iso} ISO_OUT=${ISO_OUT:-packetfence-debian-installer.iso} trap clean EXIT if ! [ -f $ISO_IN ]; then - wget https://cdimage.debian.org/cdimage/archive/12.4.0/amd64/iso-cd/$ISO_IN + wget https://cdimage.debian.org/cdimage/archive/12.6.0/amd64/iso-cd/$ISO_IN fi rm -fr isofiles/ From 845e2e89114c55b3c860b675c34ceabd8f659a71 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Wed, 9 Oct 2024 13:44:07 -0400 Subject: [PATCH 045/176] Change menu title to install PacketFence --- ci/debian-installer/gtk.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/debian-installer/gtk.cfg b/ci/debian-installer/gtk.cfg index 000d9b473d56..5aa37b1a6fd3 100644 --- a/ci/debian-installer/gtk.cfg +++ b/ci/debian-installer/gtk.cfg @@ -1,6 +1,6 @@ default install label install - menu label ^Install PacketFence + menu label ^Install Debian with PacketFence menu default kernel /install.amd/vmlinuz append vga=788 net.ifnames=0 initrd=/install.amd/initrd.gz --- quiet From 00b596a8c65e111994b987d17d236edca1aeca7a Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Wed, 9 Oct 2024 13:52:55 -0400 Subject: [PATCH 046/176] Rename iso if name contains / --- ci/debian-installer/build-and-upload.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ci/debian-installer/build-and-upload.sh b/ci/debian-installer/build-and-upload.sh index 5ec0425f9e9a..99dbbe2e1a44 100755 --- a/ci/debian-installer/build-and-upload.sh +++ b/ci/debian-installer/build-and-upload.sh @@ -10,6 +10,9 @@ then PF_VERSION+=${BASH_REMATCH[1]}; PF_VERSION+=.0; echo "Maintenance Branch detected, try to match tag version with PF version = $PF_VERSION" +elif [[ "$PF_VERSION" =~ ^.*\/.*$ ]]; +then + PF_VERSION="`echo $PF_VERSION | sed -r 's/\//-/g'`" fi PF_RELEASE="`echo $PF_RELEASE | sed -r 's/.*\b([0-9]+\.[0-9]+)\.[0-9]+/\1/g'`" From 3ba9d736f50d2bba2ade68ee7c2ca0c63a822630 Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Wed, 9 Oct 2024 15:13:29 -0400 Subject: [PATCH 047/176] Fixes Aruba ACL network mask --- lib/pf/Switch/Aruba/ArubaOS_CX_10_x.pm | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/pf/Switch/Aruba/ArubaOS_CX_10_x.pm b/lib/pf/Switch/Aruba/ArubaOS_CX_10_x.pm index 16af81524495..45dc34cffb46 100644 --- a/lib/pf/Switch/Aruba/ArubaOS_CX_10_x.pm +++ b/lib/pf/Switch/Aruba/ArubaOS_CX_10_x.pm @@ -54,6 +54,7 @@ use pf::util; use pf::util::radius qw(perform_disconnect perform_coa); use Try::Tiny; use pf::locationlog; +use NetAddr::IP; sub description { 'Aruba CX Switch 10.x' } @@ -392,7 +393,9 @@ sub acl_chewer { $dest = "any"; } elsif($acl->{'destination'}->{'ipv4_addr'} ne '0.0.0.0') { if ($acl->{'destination'}->{'wildcard'} ne '0.0.0.0') { - $dest = $acl->{'destination'}->{'ipv4_addr'}."/".norm_net_mask($acl->{'destination'}->{'wildcard'}); + my $net_addr = NetAddr::IP->new($acl->{'destination'}->{'ipv4_addr'}, norm_net_mask($acl->{'destination'}->{'wildcard'})); + my $cidr = $net_addr->cidr(); + $dest = $cidr; } else { $dest = $acl->{'destination'}->{'ipv4_addr'}; } @@ -402,7 +405,9 @@ sub acl_chewer { $src = "any"; } elsif($acl->{'source'}->{'ipv4_addr'} ne '0.0.0.0') { if ($acl->{'source'}->{'wildcard'} ne '0.0.0.0') { - $src = $acl->{'source'}->{'ipv4_addr'}."/".norm_net_mask($acl->{'source'}->{'wildcard'}); + my $net_addr = NetAddr::IP->new($acl->{'source'}->{'ipv4_addr'}, norm_net_mask($acl->{'source'}->{'wildcard'})); + my $cidr = $net_addr->cidr(); + $src = $cidr; } else { $src = $acl->{'source'}->{'ipv4_addr'}; } From f69079e486fe71bd1e2f3cd1236bcddbba2c0947 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Wed, 9 Oct 2024 16:02:08 -0400 Subject: [PATCH 048/176] Reduce menu size on boot installation --- ci/debian-installer/grub.cfg | 40 +++++------------------------------- 1 file changed, 5 insertions(+), 35 deletions(-) diff --git a/ci/debian-installer/grub.cfg b/ci/debian-installer/grub.cfg index 084ea4d6a2b7..4373f3240e81 100644 --- a/ci/debian-installer/grub.cfg +++ b/ci/debian-installer/grub.cfg @@ -24,65 +24,35 @@ fi insmod play play 960 440 1 0 4 440 1 set theme=/boot/grub/theme/1 +menuentry --hotkey=i 'Install Debian Only' { + set background_color=black + linux /install.amd/vmlinuz vga=788 net.ifnames=0 --- quiet + initrd /install.amd/initrd.gz +} submenu --hotkey=a 'Advanced options ...' { set menu_color_normal=cyan/blue set menu_color_highlight=white/blue set theme=/boot/grub/theme/1-1 set gfxpayload=keep - menuentry '... Install only Debian without PacketFence' { - set background_color=black - linux /install.amd/vmlinuz vga=788 net.ifnames=0 --- quiet - initrd /install.amd/initrd.gz - } - menuentry '... Graphical expert install' { - set background_color=black - linux /install.amd/vmlinuz priority=low vga=788 --- - initrd /install.amd/gtk/initrd.gz - } menuentry '... Graphical rescue mode' { set background_color=black linux /install.amd/vmlinuz vga=788 rescue/enable=true --- quiet initrd /install.amd/gtk/initrd.gz } - menuentry '... Graphical automated install' { - set background_color=black - linux /install.amd/vmlinuz auto=true priority=critical vga=788 --- quiet - initrd /install.amd/gtk/initrd.gz - } - menuentry --hotkey=x '... Expert install' { - set background_color=black - linux /install.amd/vmlinuz priority=low vga=788 --- - initrd /install.amd/initrd.gz - } menuentry --hotkey=r '... Rescue mode' { set background_color=black linux /install.amd/vmlinuz vga=788 rescue/enable=true --- quiet initrd /install.amd/initrd.gz } - menuentry --hotkey=a '... Automated install' { - set background_color=black - linux /install.amd/vmlinuz auto=true priority=critical vga=788 --- quiet - initrd /install.amd/initrd.gz - } submenu --hotkey=s '... Speech-enabled advanced options ...' { set menu_color_normal=cyan/blue set menu_color_highlight=white/blue set theme=/boot/grub/theme/1-1-1 set gfxpayload=keep - menuentry --hotkey=x '... Expert speech install' { - set background_color=black - linux /install.amd/vmlinuz priority=low vga=788 speakup.synth=soft --- - initrd /install.amd/gtk/initrd.gz - } menuentry --hotkey=r '... Rescue speech mode' { set background_color=black linux /install.amd/vmlinuz vga=788 rescue/enable=true speakup.synth=soft --- quiet initrd /install.amd/gtk/initrd.gz } - menuentry --hotkey=a '... Automated speech install' { - set background_color=black - linux /install.amd/vmlinuz auto=true priority=critical vga=788 speakup.synth=soft --- quiet - initrd /install.amd/gtk/initrd.gz - } } } From 160c9b7ff99ed046061b62017813e6df876582a3 Mon Sep 17 00:00:00 2001 From: Darren+Satkunas Date: Fri, 11 Oct 2024 13:53:54 +0000 Subject: [PATCH 049/176] increase acls in roles --- .../root/src/views/Configuration/roles/_components/TheForm.vue | 1 + 1 file changed, 1 insertion(+) diff --git a/html/pfappserver/root/src/views/Configuration/roles/_components/TheForm.vue b/html/pfappserver/root/src/views/Configuration/roles/_components/TheForm.vue index ce1f272ecf0f..0080c5b7fdee 100644 --- a/html/pfappserver/root/src/views/Configuration/roles/_components/TheForm.vue +++ b/html/pfappserver/root/src/views/Configuration/roles/_components/TheForm.vue @@ -40,6 +40,7 @@ Date: Fri, 11 Oct 2024 07:53:18 -0700 Subject: [PATCH 050/176] Update meraki.asciidoc Minor correction and clearer direction on where the Meraki CA certificate can be downloaded. --- docs/network/networkdevice/meraki.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/network/networkdevice/meraki.asciidoc b/docs/network/networkdevice/meraki.asciidoc index fa7ff22b16dd..1d30b2c603fe 100644 --- a/docs/network/networkdevice/meraki.asciidoc +++ b/docs/network/networkdevice/meraki.asciidoc @@ -101,7 +101,7 @@ The Meraki switch offer configuration for VLAN enforcement only. You will need to access the Meraki dashboard to configure your switch. When you reach it you will need first to create a policy. You can create a "MAC authentication bypass" or a "802.1X" policy. Depending if you want to authenticate user via dot1x or MAB. You cannot combine both neither use a fallback mode on the same port, each port with a policy applied will be exclusive to MAB or dot1x. -To access the policy creation go to 'Switch->Access policies' in the Meraki dashboard menu. From there create a new policy, use the example below to create your policy. +To access the policy creation go to 'Switching->Access Policies' in the Meraki dashboard menu. From there create a new policy, use the example below to create your policy. image::Meraki-switch-policies.png[scaledwidth="100%",alt="Access policies"] @@ -117,7 +117,7 @@ Then, in order to enable RADSEC, go in your SSID configuration and under 'RADIUS After saving, check the RADSEC checkbox and save your settings. -Now, on your PacketFence server, you must add the Meraki CA root to the trusted Certificate Authorities of FreeRADIUS when performing RADSEC. You should download the Meraki CA certificate from here http://changeme.com/meraki-root.crt and append it to the content of /usr/local/pf/raddb/certs/ca.pem on your PacketFence server. +Now, on your PacketFence server, you must add the Meraki CA root to the trusted Certificate Authorities of FreeRADIUS when performing RADSEC. You should download the Meraki CA certificate from your Meraki dashboard under Organization->Configure->Certificates and selecting "Download CA" below RadSec AP Certificates. You can then append the content of this certificate to /usr/local/pf/raddb/certs/ca.pem on your PacketFence server. Next, restart radiusd to reload the CA certificates using: From bfabc33e52d2af374191a9dd8c6dd54fbf319dba Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Fri, 11 Oct 2024 11:18:43 -0400 Subject: [PATCH 051/176] Change menu on iso --- .../create-debian-installer.sh | 5 ++- ci/debian-installer/drk.cfg | 6 ++++ ci/debian-installer/menu.cfg | 35 +++++++++++++++++++ ci/debian-installer/txt.cfg | 4 +++ 4 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 ci/debian-installer/drk.cfg create mode 100644 ci/debian-installer/menu.cfg create mode 100644 ci/debian-installer/txt.cfg diff --git a/ci/debian-installer/create-debian-installer.sh b/ci/debian-installer/create-debian-installer.sh index d0b55750f601..163a19167b94 100755 --- a/ci/debian-installer/create-debian-installer.sh +++ b/ci/debian-installer/create-debian-installer.sh @@ -32,8 +32,11 @@ chmod -w -R isofiles/install.amd/ chmod a+w isofiles/isolinux/gtk.cfg isofiles/isolinux/drkgtk.cfg isofiles/boot/grub/grub.cfg cp gtk.cfg isofiles/isolinux/gtk.cfg cp gtk.cfg isofiles/isolinux/drkgtk.cfg +cp drk.cfg isofiles/isolinux/drk.cfg +cp menu.cfg isofiles/isolinux/menu.cfg +cp txt.cfg isofiles/isolinux/txt.cfg cp grub.cfg isofiles/boot/grub/grub.cfg -chmod 0444 isofiles/isolinux/gtk.cfg isofiles/isolinux/drkgtk.cfg isofiles/boot/grub/grub.cfg +chmod 0444 isofiles/isolinux/* cp postinst-debian-installer.sh isofiles/ cd isofiles diff --git a/ci/debian-installer/drk.cfg b/ci/debian-installer/drk.cfg new file mode 100644 index 000000000000..abb4a8a2de14 --- /dev/null +++ b/ci/debian-installer/drk.cfg @@ -0,0 +1,6 @@ +label installdark +default installdark + menu label ^Install Debian Only + menu default + kernel /install.amd/vmlinuz + append vga=788 initrd=/install.amd/initrd.gz theme=dark --- quiet diff --git a/ci/debian-installer/menu.cfg b/ci/debian-installer/menu.cfg new file mode 100644 index 000000000000..c07a278e1f01 --- /dev/null +++ b/ci/debian-installer/menu.cfg @@ -0,0 +1,35 @@ +menu hshift 4 +menu width 70 + +menu title ^GPacketfence Debian GNU/Linux installer menu (BIOS mode) +include stdmenu.cfg +include gtk.cfg +include txt.cfg +menu begin advanced + menu label ^Advanced options + menu title Advanced options + include stdmenu.cfg + label mainmenu + menu label ^Back.. + menu exit + include rqtxt.cfg +menu end +menu begin dark + menu label Accessible ^dark contrast installer menu + menu title Accessible dark contrast option + include drkmenu.cfg + label mainmenu + menu label ^Back.. + menu exit + include drk.cfg + menu begin advanced + menu label ^Advanced options + menu title Advanced options + include drkmenu.cfg + label mainmenu + menu label ^Back.. + menu exit + include rqdrk.cfg + menu end + include x86drkme.cfg +menu end diff --git a/ci/debian-installer/txt.cfg b/ci/debian-installer/txt.cfg new file mode 100644 index 000000000000..d7388e94a59b --- /dev/null +++ b/ci/debian-installer/txt.cfg @@ -0,0 +1,4 @@ +label install + menu label ^Install Debian Only + kernel /install.amd/vmlinuz + append vga=788 initrd=/install.amd/initrd.gz --- quiet From 65e599a39d076eeb1d823463e19c0508d3dd8556 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Fri, 11 Oct 2024 12:19:14 -0400 Subject: [PATCH 052/176] Juste leave rescue and install packetfence --- ci/debian-installer/create-debian-installer.sh | 4 +--- ci/debian-installer/drk.cfg | 6 ------ ci/debian-installer/drkgtk.cfg | 7 +++++++ ci/debian-installer/menu.cfg | 5 ++--- ci/debian-installer/txt.cfg | 4 ---- 5 files changed, 10 insertions(+), 16 deletions(-) delete mode 100644 ci/debian-installer/drk.cfg create mode 100644 ci/debian-installer/drkgtk.cfg delete mode 100644 ci/debian-installer/txt.cfg diff --git a/ci/debian-installer/create-debian-installer.sh b/ci/debian-installer/create-debian-installer.sh index 163a19167b94..44ba251da154 100755 --- a/ci/debian-installer/create-debian-installer.sh +++ b/ci/debian-installer/create-debian-installer.sh @@ -31,10 +31,8 @@ chmod -w -R isofiles/install.amd/ chmod a+w isofiles/isolinux/gtk.cfg isofiles/isolinux/drkgtk.cfg isofiles/boot/grub/grub.cfg cp gtk.cfg isofiles/isolinux/gtk.cfg -cp gtk.cfg isofiles/isolinux/drkgtk.cfg -cp drk.cfg isofiles/isolinux/drk.cfg +cp drkgtk.cfg isofiles/isolinux/drkgtk.cfg cp menu.cfg isofiles/isolinux/menu.cfg -cp txt.cfg isofiles/isolinux/txt.cfg cp grub.cfg isofiles/boot/grub/grub.cfg chmod 0444 isofiles/isolinux/* diff --git a/ci/debian-installer/drk.cfg b/ci/debian-installer/drk.cfg deleted file mode 100644 index abb4a8a2de14..000000000000 --- a/ci/debian-installer/drk.cfg +++ /dev/null @@ -1,6 +0,0 @@ -label installdark -default installdark - menu label ^Install Debian Only - menu default - kernel /install.amd/vmlinuz - append vga=788 initrd=/install.amd/initrd.gz theme=dark --- quiet diff --git a/ci/debian-installer/drkgtk.cfg b/ci/debian-installer/drkgtk.cfg new file mode 100644 index 000000000000..26b462e4caaa --- /dev/null +++ b/ci/debian-installer/drkgtk.cfg @@ -0,0 +1,7 @@ +default installdark +label installdark + menu label ^Install Debian with PacketFence + menu default + kernel /install.amd/vmlinuz + append vga=788 net.ifnames=0 initrd=/install.amd/initrd.gz --- quiet + diff --git a/ci/debian-installer/menu.cfg b/ci/debian-installer/menu.cfg index c07a278e1f01..387a46480f4c 100644 --- a/ci/debian-installer/menu.cfg +++ b/ci/debian-installer/menu.cfg @@ -1,10 +1,9 @@ menu hshift 4 menu width 70 -menu title ^GPacketfence Debian GNU/Linux installer menu (BIOS mode) +menu title Packetfence Debian GNU/Linux installer menu (BIOS mode) include stdmenu.cfg include gtk.cfg -include txt.cfg menu begin advanced menu label ^Advanced options menu title Advanced options @@ -21,7 +20,7 @@ menu begin dark label mainmenu menu label ^Back.. menu exit - include drk.cfg + include drkgtk.cfg menu begin advanced menu label ^Advanced options menu title Advanced options diff --git a/ci/debian-installer/txt.cfg b/ci/debian-installer/txt.cfg deleted file mode 100644 index d7388e94a59b..000000000000 --- a/ci/debian-installer/txt.cfg +++ /dev/null @@ -1,4 +0,0 @@ -label install - menu label ^Install Debian Only - kernel /install.amd/vmlinuz - append vga=788 initrd=/install.amd/initrd.gz --- quiet From 416bf90f4364ffb194f3740792b7ee492811d5ac Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Tue, 15 Oct 2024 09:04:12 -0400 Subject: [PATCH 053/176] Change title in menu to fit one line 46 char --- ci/debian-installer/menu.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/debian-installer/menu.cfg b/ci/debian-installer/menu.cfg index 387a46480f4c..a06dd9e0af18 100644 --- a/ci/debian-installer/menu.cfg +++ b/ci/debian-installer/menu.cfg @@ -1,7 +1,7 @@ menu hshift 4 menu width 70 -menu title Packetfence Debian GNU/Linux installer menu (BIOS mode) +menu title PacketFence Debian installer menu (BIOS mode) include stdmenu.cfg include gtk.cfg menu begin advanced @@ -15,7 +15,7 @@ menu begin advanced menu end menu begin dark menu label Accessible ^dark contrast installer menu - menu title Accessible dark contrast option + menu title PacketFence Debian installer (dark contrast) include drkmenu.cfg label mainmenu menu label ^Back.. From 86973387780fe7e2f3ae47ee13cbc722801c9006 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Tue, 15 Oct 2024 13:44:02 -0400 Subject: [PATCH 054/176] Remove extra variable, add remove and restore table --- .../00_backup_db_and_restore.yml | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml b/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml index 4a308198e75d..19b10038f3fa 100644 --- a/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml +++ b/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml @@ -29,14 +29,21 @@ testcases: steps: - type: exec script: 'find {{.backup_db_and_restore.backup_dir}} -name "packetfence-exportable-backup-*.tgz" -newermt "-1 minute"' - vars: - backup_name: - from: result.systemout + +- name: drop_pf_db + steps: + - type: exec + script: mysql -e "DROP DATABASE pf;" + +- name: create_pf_db + steps: + - type: exec + script: mysql -e "CREATE DATABASE pf;" - name: import steps: - type: exec - script: '/usr/local/pf/addons/full-import/import.sh --db -f {{ .backup_name }}' + script: '/usr/local/pf/addons/full-import/import.sh --db -f {{.get_backup_name.result.systemout}}' - name: search_user_in_db steps: From b3bcc40c43ed3900d5199efb05beb3f53e6f74a8 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Tue, 15 Oct 2024 13:45:45 -0400 Subject: [PATCH 055/176] Add handling of devel schema before import --- addons/functions/database.functions | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/addons/functions/database.functions b/addons/functions/database.functions index f7cb4323cd71..9be252af39cb 100644 --- a/addons/functions/database.functions +++ b/addons/functions/database.functions @@ -48,6 +48,7 @@ function import_mysqldump() { # Detect minor version of the restored data restoring_version=`egrep -o '[0-9]+\.[0-9]+\.[0-9]+$' usr/local/pf/conf/pf-release | egrep -o '^[0-9]+\.[0-9]+'` echo "Importing bare schema for $restoring_version" + handle_devel_db_schema $restoring_version mysql ${mariadb_args} $db_name < /usr/local/pf/db/pf-schema-$restoring_version.sql check_code $? @@ -254,3 +255,13 @@ function uninstall_current_mariabackup() { yum remove -q -y MariaDB-backup fi } + +function handle_devel_db_schema() { + local restoring_version=$1 + if [ ! -f /usr/local/pf/db/pf-schema-$restoring_version.sql ]; then + main_splitter + echo "The db schema for $restoring_version does not exist in /usr/local/pf/db/. pf-schema-X.Y.sql will be used." + sub_splitter + cp /usr/local/pf/db/pf-schema-X.Y.sql /usr/local/pf/db/pf-schema-$restoring_version.sql + fi +} From 5b27599c5eab2802febc0a8c98b7e79175cb1a3e Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Tue, 15 Oct 2024 13:57:13 -0400 Subject: [PATCH 056/176] Add backupfile name in output, Add more output from cleaning, add directory creation (was missing) --- addons/exportable-backup.sh | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/addons/exportable-backup.sh b/addons/exportable-backup.sh index 85686b3eed37..94175a40a687 100755 --- a/addons/exportable-backup.sh +++ b/addons/exportable-backup.sh @@ -101,10 +101,16 @@ create_backup_directory(){ ### Cleaning ############################################################################# clean_backup(){ + echo "Start backup cleaning" find $BACKUP_DIRECTORY -name "$BACKUP_PF_FILENAME-*.tgz" -mtime +$NB_DAYS_TO_KEEP_BACKUP -delete + echo "Old backup cleaned" find $BACKUP_DIRECTORY -name "$BACKUP_DB_FILENAME-*.sql.gz" -delete + echo "Temp db backup cleaned" find $BACKUP_DIRECTORY -name "$BACKUP_CONF_FILENAME-*.tgz" -delete - find $BACKUP_DIRECTORY -name "$BACKUP_OLD_CONF_FILENAME-*.tgz" -delete + echo "Temp config backup cleaned" + find $BACKUP_DIRECTORY -name "$BACKUP_OLD_CONF_FILENAME-*.tgz" -mtime +$NB_DAYS_TO_KEEP_BACKUP -delete + echo "Old config backup cleaned" + echo "Backup cleaning is done" } ############################################################################# @@ -153,20 +159,24 @@ done if [ -z "$BACKUP_FILE" ]; then echo "Default directory $BACKUP_DIRECTORY will be used." - BACKUP_FILE=$BACKUP_DIRECTORY/$BACKUP_CONF_FILENAME-`date +%F_%Hh%M`.tgz + BACKUP_FILE=$BACKUP_DIRECTORY/$BACKUP_PF_FILENAME-`date +%F_%Hh%M`.tgz + echo "The backup file will be $BACKUP_FILE" fi ############################################################################# ### Main ############################################################################# -/bin/bash /usr/local/pf/addons/backup-and-maintenance.sh -if [ ! -f $BACKUP_FILE ]; then - /bin/bash /usr/local/pf/addons/full-import/export.sh $BACKUP_FILE -else - echo -e $BACKUP_DIRECTORY$BACKUP_CONF_FILENAME ", file already created. \n" -fi -clean_backup -if [ $do_replication == 1 ]; then - replicate_backup +create_backup_directory +if check_disk_space; then + /bin/bash /usr/local/pf/addons/backup-and-maintenance.sh + if [ ! -f $BACKUP_FILE ]; then + /bin/bash /usr/local/pf/addons/full-import/export.sh $BACKUP_FILE + else + echo -e $BACKUP_FILE ", file already created. \n" + fi + clean_backup + if [ $do_replication == 1 ]; then + replicate_backup + fi + echo "Exportable backup is done" fi -echo "Exportable backup is done" From afcface7f753644e3074825d222a1e4af941c449 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Tue, 15 Oct 2024 14:03:23 -0400 Subject: [PATCH 057/176] Remove importing schema before importing db --- addons/functions/database.functions | 6 ------ 1 file changed, 6 deletions(-) diff --git a/addons/functions/database.functions b/addons/functions/database.functions index 9be252af39cb..03bec347291c 100644 --- a/addons/functions/database.functions +++ b/addons/functions/database.functions @@ -45,12 +45,6 @@ function import_mysqldump() { # We reimport the schema so that we have the functions and triggers if the dump doesn't contain the triggers if ! egrep "CREATE.*TRIGGER.*(AFTER|BEFORE)" $dump_file > /dev/null; then echo "Dump file was made without triggers and procedures" - # Detect minor version of the restored data - restoring_version=`egrep -o '[0-9]+\.[0-9]+\.[0-9]+$' usr/local/pf/conf/pf-release | egrep -o '^[0-9]+\.[0-9]+'` - echo "Importing bare schema for $restoring_version" - handle_devel_db_schema $restoring_version - mysql ${mariadb_args} $db_name < /usr/local/pf/db/pf-schema-$restoring_version.sql - check_code $? sub_splitter echo "Replacing create statements from the dump and removing drop statements" From afef922367056d36b7628a143ae8e83bd8e37400 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Tue, 15 Oct 2024 14:24:40 -0400 Subject: [PATCH 058/176] Remove extra cleaning done by importing schema before --- addons/functions/database.functions | 8 -------- 1 file changed, 8 deletions(-) diff --git a/addons/functions/database.functions b/addons/functions/database.functions index 03bec347291c..04a53e906483 100644 --- a/addons/functions/database.functions +++ b/addons/functions/database.functions @@ -57,14 +57,6 @@ function import_mysqldump() { sed -i 's/DELETE IGNORE FROM `action`;//g' $dump_file sed -i 's/DELETE IGNORE FROM `activation`;//g' $dump_file - # We need to drop the data from all the tables that contain data in our bare schema - # This is because the mysqldump import already contains this data - mysql ${mariadb_args} $db_name -e 'delete ignore from node_category;' - mysql ${mariadb_args} $db_name -e 'delete ignore from password;' - mysql ${mariadb_args} $db_name -e 'delete ignore from person;' - mysql ${mariadb_args} $db_name -e 'delete ignore from pf_version;' - mysql ${mariadb_args} $db_name -e 'delete ignore from radreply;' - mysql ${mariadb_args} $db_name -e 'delete ignore from sms_carrier;' else echo "Dump file includes triggers and procedures" fi From e18da6207e9f0d1b64017e178b37bfae447dae1a Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Tue, 15 Oct 2024 15:40:05 -0400 Subject: [PATCH 059/176] Add restore only db, without upgradeing --- addons/full-import/import.sh | 115 +++++++++++++++++++++-------------- 1 file changed, 69 insertions(+), 46 deletions(-) diff --git a/addons/full-import/import.sh b/addons/full-import/import.sh index c2317cdb72ce..7306cdf34757 100755 --- a/addons/full-import/import.sh +++ b/addons/full-import/import.sh @@ -40,7 +40,7 @@ prepare_import() { ls -l | grep -v export.tgz main_splitter - files_dump=`ls packetfence-files-*` + files_dump=`ls packetfence-conf-*` echo "Found files dump '$files_dump'" echo "Extracting files dump" @@ -79,31 +79,36 @@ import_db() { if echo "$db_dump" | grep '\.sql$' >/dev/null; then echo "The database dump uses mysqldump" #TODO /tmp/grants.sql should be included in the export - import_mysqldump grants.sql $db_dump usr/local/pf/conf/pf.conf + import_mysqldump grants.sql $db_dump usr/local/pf/conf/pf.conf $do_db_restore elif echo "$db_dump" | grep '\.xbstream$' >/dev/null; then echo "The database uses mariabackup" - # permit to remove mariabackup if everything goes well - # or to uninstall it if a failure occurs during installation + # permit to remove mariabackup if everything goes well + # or to uninstall it if a failure occurs during installation if install_mariabackup $pf_version_in_export; then - mariabackup_installed=true - else - uninstall_mariabackup $pf_version_in_export - exit 1 - fi + mariabackup_installed=true + else + uninstall_mariabackup $pf_version_in_export + exit 1 + fi import_mariabackup $db_dump else echo "Unable to detect format of the database dump" exit 1 fi - handle_devel_upgrade `egrep -o '[0-9]+\.[0-9]+\.[0-9]+$' /usr/local/pf/conf/pf-release | egrep -o '^[0-9]+\.[0-9]+'` + if [ "$do_db_restore" -eq 0 ] ; then + handle_devel_upgrade `egrep -o '[0-9]+\.[0-9]+\.[0-9]+$' /usr/local/pf/conf/pf-release | egrep -o '^[0-9]+\.[0-9]+'` - #TODO: check the version of the export, we want to support only 10.3.0 and above - #TODO: check if galera is enabled and stop if its the case + #TODO: check the version of the export, we want to support only 10.3.0 and above + #TODO: check if galera is enabled and stop if its the case - main_splitter - db_name=`get_db_name usr/local/pf/conf/pf.conf` - upgrade_database $db_name + main_splitter + db_name=`get_db_name usr/local/pf/conf/pf.conf` + upgrade_database $db_name + else + main_splitter + echo "Only database restoration has been selected. No upgrade on database will be done" + fi } import_config() { @@ -132,31 +137,44 @@ import_config() { restore_certificates } + finalize_import() { - main_splitter - echo "Finalizing import" + if [ "$do_db_restore" -eq 0 ] ; then + main_splitter + echo "Finalizing import" - sub_splitter - echo "Applying fixpermissions" - /usr/local/pf/bin/pfcmd fixpermissions + sub_splitter + echo "Applying fixpermissions" + /usr/local/pf/bin/pfcmd fixpermissions - sub_splitter - echo "Restarting packetfence-redis-cache" - systemctl restart packetfence-redis-cache + sub_splitter + echo "Restarting packetfence-redis-cache" + systemctl restart packetfence-redis-cache - sub_splitter - echo "Restarting packetfence-config" - systemctl restart packetfence-config + sub_splitter + echo "Restarting packetfence-config" + systemctl restart packetfence-config - sub_splitter - echo "Reloading configuration" - configreload + sub_splitter + echo "Reloading configuration" + configreload - main_splitter - echo "Completed import of the database and the configuration! Complete any necessary adjustments and restart PacketFence" + main_splitter + echo "Completed import of the database and the configuration! Complete any necessary adjustments and restart PacketFence" - # Done with everything, time to cleanup! - systemctl cat monit > /dev/null 2>&1 && systemctl enable monit + # Done with everything, time to cleanup! + systemctl cat monit > /dev/null 2>&1 && systemctl enable monit + else + main_splitter + echo "Finalizing db restoration" + + sub_splitter + echo "Restarting service packetfence-httpd.admin_dispatcher" + systemctl restart packetfence-httpd.admin_dispatcher + echo "Restarting packetfence-haproxy-admin service" + systemctl start packetfence-haproxy-admin + echo "Completed import of the database! Complete any necessary adjustments and restart PacketFence" + fi popd > /dev/null } @@ -169,9 +187,10 @@ Usage: $0 -f /path/to/export.tgz [OPTION]... Options: -f,--file Import a PacketFence export (mandatory) -h,--help Display this help - --db Import only database from PacketFence export - --conf Import only configuration from PacketFence export - --skip-adjust-conf Don't run adjustments on configuration (only use it if you know what you are doing) + --db Import only database from PacketFence export + --db-restore Restore only database from PacketFence export + --conf Import only configuration from PacketFence export + --skip-adjust-conf Don't run adjustments on configuration (only use it if you know what you are doing) EOF } @@ -181,6 +200,7 @@ EOF ############################################################################# do_full_import=1 do_db_import=0 +do_db_restore=0 do_config_import=0 do_adjust_config=1 mariabackup_installed=false @@ -189,7 +209,7 @@ EXPORT_FILE=${EXPORT_FILE:-} # Parse option # TEMP=$(getopt -o f:h --long file:,help,db,conf \ # -n "$0" -- "$@") || (echo "getopt failed." && exit 1) -TEMP=$(getopt -o f:h --long file:,help,db,conf,skip-adjust-conf \ +TEMP=$(getopt -o f:h --long file:,help,db,db-restore,conf,skip-adjust-conf \ -n "$0" -- "$@") || (echo "getopt failed." && exit 1) # Note the quotes around `$TEMP': they are essential! @@ -198,15 +218,18 @@ eval set -- "$TEMP" while true ; do case "$1" in -f|--file) - # first shift is mandatory to get file path - shift - EXPORT_FILE="$1" ; shift + # first shift is mandatory to get file path + shift + EXPORT_FILE="$1" ; shift ;; -h|--help) - help ; exit 0 ; shift + help ; exit 0 ; shift ;; --db) - do_db_import=1 ; do_full_import=0 ; shift + do_db_restore=0 ; do_db_import=1 ; do_full_import=0 ; shift + ;; + --db-restore) + do_db_restore=1 ; do_db_import=1 ; do_full_import=0 ; shift ;; --conf) do_config_import=1 ; do_full_import=0 ; shift @@ -214,11 +237,11 @@ while true ; do --skip-adjust-conf) do_adjust_config=0 ; shift ;; - --) - shift ; break + --) + shift ; break ;; - *) - echo "Wrong usage !" ; help ; exit 1 + *) + echo "Wrong usage !" ; help ; exit 1 ;; esac done From 59129670a780d8e33be829db9c80f71d056f7ac2 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Tue, 15 Oct 2024 15:42:46 -0400 Subject: [PATCH 060/176] Add comment on help about db restoration --- addons/full-import/import.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/addons/full-import/import.sh b/addons/full-import/import.sh index 7306cdf34757..b015e3679a3f 100755 --- a/addons/full-import/import.sh +++ b/addons/full-import/import.sh @@ -188,7 +188,7 @@ Options: -f,--file Import a PacketFence export (mandatory) -h,--help Display this help --db Import only database from PacketFence export - --db-restore Restore only database from PacketFence export + --db-restore Restore only database from PacketFence export without upgrade process --conf Import only configuration from PacketFence export --skip-adjust-conf Don't run adjustments on configuration (only use it if you know what you are doing) From e6d7b6ee0fab08e041e638e65544b65c822600c7 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Tue, 15 Oct 2024 15:52:39 -0400 Subject: [PATCH 061/176] Removing db is done in import script with database.functions --- .../backup_db_and_restore/00_backup_db_and_restore.yml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml b/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml index 19b10038f3fa..a9fd82cee7f6 100644 --- a/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml +++ b/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml @@ -30,16 +30,6 @@ testcases: - type: exec script: 'find {{.backup_db_and_restore.backup_dir}} -name "packetfence-exportable-backup-*.tgz" -newermt "-1 minute"' -- name: drop_pf_db - steps: - - type: exec - script: mysql -e "DROP DATABASE pf;" - -- name: create_pf_db - steps: - - type: exec - script: mysql -e "CREATE DATABASE pf;" - - name: import steps: - type: exec From ca329be206013581a41542816265e841ad89c4e3 Mon Sep 17 00:00:00 2001 From: Darren+Satkunas Date: Tue, 15 Oct 2024 20:18:58 +0000 Subject: [PATCH 062/176] fix database dumpfile replacement --- addons/functions/database.functions | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/addons/functions/database.functions b/addons/functions/database.functions index f7cb4323cd71..cf2b72a8dd97 100644 --- a/addons/functions/database.functions +++ b/addons/functions/database.functions @@ -52,11 +52,14 @@ function import_mysqldump() { check_code $? sub_splitter - echo "Replacing create statements from the dump and removing drop statements" + echo "Replacing CREATE TABLE and DROP TABLE statements" # This is done so that tables aren't dropped - sed -i "s/^DROP TABLE IF EXISTS /---DROP TABLE IF EXISTS /g" $dump_file + sed -i "s/^DROP TABLE IF EXISTS /-- DROP TABLE IF EXISTS /g" $dump_file sed -i "s/CREATE TABLE /CREATE TABLE IF NOT EXISTS /g" $dump_file + echo "Replacing INSERT statements" + sed -i "s/^INSERT INTO /INSERT IGNORE INTO /g" $dump_file + # Delete this statement if its there because this table isn't in our schema anymore but some old deployments have it sed -i 's/DELETE IGNORE FROM `dhcp_fingerprint`;//g' $dump_file sed -i 's/DELETE IGNORE FROM `action`;//g' $dump_file From a4e19c976c558114ad1609eb33b0fdb622be3a7c Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Tue, 15 Oct 2024 17:11:11 -0400 Subject: [PATCH 063/176] Format the acl name to be compliant with what the Cisco switch expect. --- lib/pf/Switch/Cisco/Cisco_IOS_15_5.pm | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/pf/Switch/Cisco/Cisco_IOS_15_5.pm b/lib/pf/Switch/Cisco/Cisco_IOS_15_5.pm index 6529338b2f91..372bba2e7526 100644 --- a/lib/pf/Switch/Cisco/Cisco_IOS_15_5.pm +++ b/lib/pf/Switch/Cisco/Cisco_IOS_15_5.pm @@ -170,7 +170,7 @@ sub returnRadiusAccessAccept { my @acl = split("\n", $access_list); $args->{'acl'} = \@acl; $args->{'acl_num'} = '101'; - push(@av_pairs, "subscriber:service-name=$mac-".$self->setRadiusSession($args)); + push(@av_pairs, "subscriber:service-name=".$args->{'user_role'}."-".$self->setRadiusSession($args)); } else { my $acl_num = 101; while($access_list =~ /([^\n]+)\n?/g){ @@ -213,7 +213,7 @@ sub returnRadiusAdvanced { my ($self, $args, $options) = @_; my $logger = $self->logger; my $status = $RADIUS::RLM_MODULE_OK; - my ($mac, $session_id) = split('-', $args->{'user_name'}); + my ($role, $session_id) = split('-', $args->{'user_name'}); my $radius_reply_ref = (); my @av_pairs; $radius_reply_ref->{'control:Proxy-To-Realm'} = 'LOCAL'; From 60296c8687cadedbf680fe3c5aaf20f6dc060d86 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Wed, 16 Oct 2024 10:35:45 -0400 Subject: [PATCH 064/176] exit only if not enough space --- addons/exportable-backup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/addons/exportable-backup.sh b/addons/exportable-backup.sh index 94175a40a687..73a2b351cc4e 100755 --- a/addons/exportable-backup.sh +++ b/addons/exportable-backup.sh @@ -81,8 +81,8 @@ check_disk_space(){ if (( $BACKUPS_AVAILABLE_SPACE < (( (( $MYSQL_USED_SPACE + $CONF_USED_SPACE )) /2 )) )); then echo "There is not enough space in $BACKUP_DIRECTORY to safely backup exportable. Skipping backup." >&2 echo "There is not enough space in $BACKUP_DIRECTORY to safely backup exportable. Skipping backup." > /usr/local/pf/var/backup_pf.status + exit $BACKUPRC fi - exit $BACKUPRC } From f6bdcc2c13d45286402df69c0afae7562208589b Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Wed, 16 Oct 2024 10:56:36 -0400 Subject: [PATCH 065/176] Change --db to --db-restore to fix the test --- .../backup_db_and_restore/00_backup_db_and_restore.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml b/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml index a9fd82cee7f6..be2b76fdc97e 100644 --- a/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml +++ b/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml @@ -33,7 +33,7 @@ testcases: - name: import steps: - type: exec - script: '/usr/local/pf/addons/full-import/import.sh --db -f {{.get_backup_name.result.systemout}}' + script: '/usr/local/pf/addons/full-import/import.sh --db-restore -f {{.get_backup_name.result.systemout}}' - name: search_user_in_db steps: From 6837f121ab8db63c0677562859e00c30f5b236dd Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Thu, 17 Oct 2024 13:46:04 -0400 Subject: [PATCH 066/176] Fix #8351 --- addons/perl-client/debian/fingerbank.postinst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/addons/perl-client/debian/fingerbank.postinst b/addons/perl-client/debian/fingerbank.postinst index 2c8d61261419..5011ef635c9f 100644 --- a/addons/perl-client/debian/fingerbank.postinst +++ b/addons/perl-client/debian/fingerbank.postinst @@ -30,7 +30,7 @@ case "$1" in make init-db-local make fixpermissions /usr/local/fingerbank/conf/upgrade/* - chown fingerbank.fingerbank /usr/local/fingerbank/conf/fingerbank.conf + chown fingerbank:fingerbank /usr/local/fingerbank/conf/fingerbank.conf ;; abort-upgrade|abort-remove|abort-deconfigure) From a4336d8e279c3d22af5660c81509bb3963a5def6 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Thu, 17 Oct 2024 13:50:39 -0400 Subject: [PATCH 067/176] CHange pf.pf to pf:pf --- debian/packetfence-config.postinst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/packetfence-config.postinst b/debian/packetfence-config.postinst index 97a3062b9e4e..3cdac2f5b4e3 100644 --- a/debian/packetfence-config.postinst +++ b/debian/packetfence-config.postinst @@ -24,7 +24,7 @@ case "$1" in if [ ! -f /usr/local/pf/conf/pfconfig.conf ]; then echo "pfconfig.conf doesnt exits" touch /usr/local/pf/conf/pfconfig.conf - chown pf.pf /usr/local/pf/conf/pfconfig.conf + chown pf:pf /usr/local/pf/conf/pfconfig.conf else echo "pfconfig.conf already exists, won't touch it!" fi From 7219f7c4f01004332b336700d4e00d6fe7220cd3 Mon Sep 17 00:00:00 2001 From: stegar123 <53397145+stegar123@users.noreply.github.com> Date: Thu, 17 Oct 2024 15:13:59 -0400 Subject: [PATCH 068/176] Update packetfence-perl_build_image_package.yml --- .github/workflows/packetfence-perl_build_image_package.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/packetfence-perl_build_image_package.yml b/.github/workflows/packetfence-perl_build_image_package.yml index 67bea66c0c0e..9e95d8eb8bc1 100644 --- a/.github/workflows/packetfence-perl_build_image_package.yml +++ b/.github/workflows/packetfence-perl_build_image_package.yml @@ -101,7 +101,7 @@ jobs: PATH_PACKAGE: ${{ inputs._IMAGE_TYPE == 'rhel8' && 'rhel8' || 'debian' }} - name: Upload the package to artifactory ${{inputs._IMAGE_TYPE}} - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4.4.3 with: name: ${{ env.ARTIFACTORY_NAME }} path: ${{ inputs._OUTPUT_DIRECTORY }}/${{ env.PATH_PACKAGE }}/packages/${{ env.PACKAGE_NAME }} From 05f0f85c52546a564410013a9bd587128324203e Mon Sep 17 00:00:00 2001 From: stegar123 <53397145+stegar123@users.noreply.github.com> Date: Thu, 17 Oct 2024 15:14:31 -0400 Subject: [PATCH 069/176] Update perl-client_build_package.yml --- .github/workflows/perl-client_build_package.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/perl-client_build_package.yml b/.github/workflows/perl-client_build_package.yml index 7a3fb6033e72..f1efe1b6fb61 100644 --- a/.github/workflows/perl-client_build_package.yml +++ b/.github/workflows/perl-client_build_package.yml @@ -70,7 +70,7 @@ jobs: CI_COMMIT_REF_NAME: ${{ inputs._BRANCH_NAME }} - name: Upload the package to artifactory ${{inputs._IMAGE_TYPE}} - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4.4.3 with: name: ${{ env.ARTIFACTORY_NAME }} # path: /__w/packetfence/packetfence/addons/perl-client/result/${{ inputs._IMAGE_TYPE == 'rhel8' && 'centos/8' || inputs._IMAGE_TYPE == 'debian11' && 'debian/bookworm'}}/${{ env.PACKAGE_NAME }} From 37aebf22ba42363321399359f553b878bb2599a9 Mon Sep 17 00:00:00 2001 From: Igor Stegarescu Date: Mon, 21 Oct 2024 12:03:20 +0000 Subject: [PATCH 070/176] on psono.py scrypt return as json for "all" option --- addons/packetfence-perl/psono.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/addons/packetfence-perl/psono.py b/addons/packetfence-perl/psono.py index cc41a2319a8d..dce381479580 100644 --- a/addons/packetfence-perl/psono.py +++ b/addons/packetfence-perl/psono.py @@ -48,7 +48,7 @@ def return_data(return_value, decrypted_secret): elif return_value == "password_notes": return decrypted_secret['application_password_notes'] elif return_value == "all": - return decrypted_secret + return json.dumps(decrypted_secret) else: return f"The condition does not correspond" From db0af6eb5e764609fdf596a3f4ca61c23a404fe1 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Mon, 21 Oct 2024 08:21:30 -0400 Subject: [PATCH 071/176] Add missing chown found by @E-ThanG https://github.com/inverse-inc/packetfence/issues/8351#issuecomment-2420895561 --- addons/AD/migrate.pl | 2 +- addons/monit/monitoring-scripts/update.sh | 4 ++-- addons/perl-client/Makefile | 4 ++-- addons/pfconfig/pfconfig.init | 2 +- bin/cluster/sync | 2 +- rpm/packetfence.spec | 4 ++-- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/addons/AD/migrate.pl b/addons/AD/migrate.pl index 88f44f2413d4..fdbb432b0b45 100755 --- a/addons/AD/migrate.pl +++ b/addons/AD/migrate.pl @@ -86,7 +86,7 @@ BEGIN print "Please re-run the script again or configure the domain directly through the admin UI in 'Configuration->Domain' \n"; } -pf_run("chown pf.pf $domain_config_file"); +pf_run("chown pf:pf $domain_config_file"); =head1 AUTHOR diff --git a/addons/monit/monitoring-scripts/update.sh b/addons/monit/monitoring-scripts/update.sh index 28303e6f5b11..919117a252ce 100755 --- a/addons/monit/monitoring-scripts/update.sh +++ b/addons/monit/monitoring-scripts/update.sh @@ -51,7 +51,7 @@ id -u pf-monitoring || execute_and_check "useradd pf-monitoring -s /bin/bash" execute_and_check "usermod -a -G pf pf-monitoring" execute_and_check "mkdir -p $script_dir" -execute_and_check "chown root.pf-monitoring $script_dir" +execute_and_check "chown root:pf-monitoring $script_dir" execute_and_check "chmod 0750 $script_dir" download_and_check $script_registry_url $script_registry_file @@ -76,7 +76,7 @@ while read u; do echo "Placing $u in $fname" execute_and_check "mv $tmp $script_dir/$fname" "Cannot place file in script directory" execute_and_check "chmod ug+rx $script_dir/$fname" "Cannot set executable bit on script" - execute_and_check "chown pf-monitoring.pf-monitoring $script_dir/$fname" "Cannot set executable bit on script" + execute_and_check "chown pf-monitoring:pf-monitoring $script_dir/$fname" "Cannot set executable bit on script" done <$script_registry_file if [ $ERROR -ne 0 ]; then diff --git a/addons/perl-client/Makefile b/addons/perl-client/Makefile index 0cececd38138..747ebdebda87 100644 --- a/addons/perl-client/Makefile +++ b/addons/perl-client/Makefile @@ -40,13 +40,13 @@ FBGROUP=fingerbank init-db-local: @db/upgrade.pl --database db/fingerbank_Local.db; \ - chown fingerbank.fingerbank /usr/local/fingerbank/db/fingerbank_Local.db; \ + chown fingerbank:fingerbank /usr/local/fingerbank/db/fingerbank_Local.db; \ chmod 664 /usr/local/fingerbank/db/fingerbank_Local.db; \ init-db-upstream: @read -p "API key (ENTER if none): " api_key; \ perl -I/usr/local/fingerbank/lib -Mfingerbank::DB -Mfingerbank::Util -Mfingerbank::Log -e "fingerbank::Log::init_logger; fingerbank::DB::update_upstream( (api_key => \"$$api_key\") )"; \ - chown fingerbank.fingerbank /usr/local/fingerbank/db/fingerbank_Upstream.db; \ + chown fingerbank:fingerbank /usr/local/fingerbank/db/fingerbank_Upstream.db; \ chmod 664 /usr/local/fingerbank/db/fingerbank_Upstream.db; \ package-files: diff --git a/addons/pfconfig/pfconfig.init b/addons/pfconfig/pfconfig.init index 6645c90aa2bd..30d102c6e1a3 100755 --- a/addons/pfconfig/pfconfig.init +++ b/addons/pfconfig/pfconfig.init @@ -31,7 +31,7 @@ start() { fi # create directory, fix rights and remove previous socket mkdir -p $control_files_dir - chown -R $User.$User $control_files_dir + chown -R $User:$User $control_files_dir chmod 0775 $control_files_dir rm -f $socket_path daemon --user=$User ${prog} -d diff --git a/bin/cluster/sync b/bin/cluster/sync index f50d65e339a4..c96855232874 100755 --- a/bin/cluster/sync +++ b/bin/cluster/sync @@ -146,7 +146,7 @@ if($master_server){ open(my $fh, '>', $file); print $fh $result; close($fh); - `chown pf.pf $file`; + `chown pf:pf $file`; }; if($@){ get_logger->error("Failed to sync file : $file . $@"); diff --git a/rpm/packetfence.spec b/rpm/packetfence.spec index 9eaa122d985b..e31558d70977 100644 --- a/rpm/packetfence.spec +++ b/rpm/packetfence.spec @@ -715,7 +715,7 @@ fi if [ ! -f /usr/local/pf/conf/pf.conf ]; then echo "Touch pf.conf because it doesnt exist" touch /usr/local/pf/conf/pf.conf - chown pf.pf /usr/local/pf/conf/pf.conf + chown pf:pf /usr/local/pf/conf/pf.conf else echo "pf.conf already exists, won't touch it!" fi @@ -723,7 +723,7 @@ fi if [ ! -f /usr/local/pf/conf/pfconfig.conf ]; then echo "Touch pfconfig.conf because it doesnt exist" touch /usr/local/pf/conf/pfconfig.conf - chown pf.pf /usr/local/pf/conf/pfconfig.conf + chown pf:pf /usr/local/pf/conf/pfconfig.conf else echo "pfconfig.conf already exists, won't touch it!" fi From df99f9d318703d5e7f758dd6ff5c87bc10b9649b Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Mon, 21 Oct 2024 09:46:50 -0400 Subject: [PATCH 072/176] Change name of version to replace / by - in name --- ci/packer/zen/build-and-upload.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ci/packer/zen/build-and-upload.sh b/ci/packer/zen/build-and-upload.sh index 4b1efa2b0e48..7f4ab3f1a593 100755 --- a/ci/packer/zen/build-and-upload.sh +++ b/ci/packer/zen/build-and-upload.sh @@ -8,6 +8,9 @@ then PF_VERSION+=${BASH_REMATCH[1]}; PF_VERSION+=.0; echo "Maintenance Branch detected, try to match tag version with PF version = $PF_VERSION" +elif [[ "$PF_VERSION" =~ ^.*\/.*$ ]]; +then + PF_VERSION="`echo $PF_VERSION | sed -r 's/\//-/g'`" fi VM_NAME=${VM_NAME:-vm} From 32b781d351454a49a18fd47fee37772aaf9bca51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cabprasa=E2=80=9D?= <123509565+abprasa@users.noreply.github.com> Date: Fri, 25 Oct 2024 15:50:53 +0000 Subject: [PATCH 073/176] Renamed WLC.pm to Cisco_WLC_AireOS.pm and WLC_5500.pm to Cisco_WLC_IOS_XE.pm and removed other WLC modules --- .../Cisco/{WLC.pm => Cisco_WLC_AireOS.pm} | 8 +- .../{WLC_4400.pm => Cisco_WLC_IOS_XE.pm} | 11 +- lib/pf/Switch/Cisco/WLC_2100.pm | 164 ------------------ lib/pf/Switch/Cisco/WLC_2106.pm | 51 ------ lib/pf/Switch/Cisco/WLC_2500.pm | 54 ------ lib/pf/Switch/Cisco/WLC_5500.pm | 54 ------ 6 files changed, 10 insertions(+), 332 deletions(-) rename lib/pf/Switch/Cisco/{WLC.pm => Cisco_WLC_AireOS.pm} (99%) rename lib/pf/Switch/Cisco/{WLC_4400.pm => Cisco_WLC_IOS_XE.pm} (75%) delete mode 100644 lib/pf/Switch/Cisco/WLC_2100.pm delete mode 100644 lib/pf/Switch/Cisco/WLC_2106.pm delete mode 100644 lib/pf/Switch/Cisco/WLC_2500.pm delete mode 100644 lib/pf/Switch/Cisco/WLC_5500.pm diff --git a/lib/pf/Switch/Cisco/WLC.pm b/lib/pf/Switch/Cisco/Cisco_WLC_AireOS.pm similarity index 99% rename from lib/pf/Switch/Cisco/WLC.pm rename to lib/pf/Switch/Cisco/Cisco_WLC_AireOS.pm index 0655b393dbd4..ae93a9fc7821 100644 --- a/lib/pf/Switch/Cisco/WLC.pm +++ b/lib/pf/Switch/Cisco/Cisco_WLC_AireOS.pm @@ -1,9 +1,9 @@ -package pf::Switch::Cisco::WLC; +package pf::Switch::Cisco::Cisco_WLC_AireOS; =head1 NAME -pf::Switch::Cisco::WLC - Object oriented module to parse SNMP traps and manage -Cisco Wireless Controllers (WLC) and Wireless Service Modules (WiSM) +pf::Switch::Cisco::Cisco_WLC_AireOS - Object oriented module to parse SNMP traps and manage +Cisco WLC (AireOS) and Wireless Service Modules (WiSM) =head1 STATUS @@ -123,7 +123,7 @@ use pf::security_event qw(security_event_count_reevaluate_access); use pf::radius::constants; use pf::locationlog qw(locationlog_get_session); -sub description { 'Cisco Wireless Controller (WLC)' } +sub description { 'Cisco WLC (AireOS)' } =head1 SUBROUTINES diff --git a/lib/pf/Switch/Cisco/WLC_4400.pm b/lib/pf/Switch/Cisco/Cisco_WLC_IOS_XE.pm similarity index 75% rename from lib/pf/Switch/Cisco/WLC_4400.pm rename to lib/pf/Switch/Cisco/Cisco_WLC_IOS_XE.pm index ef2075a95cd5..bbe21c6bb3f2 100644 --- a/lib/pf/Switch/Cisco/WLC_4400.pm +++ b/lib/pf/Switch/Cisco/Cisco_WLC_IOS_XE.pm @@ -1,12 +1,13 @@ -package pf::Switch::Cisco::WLC_4400; +package pf::Switch::Cisco::Cisco_WLC_IOS_XE; =head1 NAME -pf::Switch::Cisco::WLC_4400 - Object oriented module to parse SNMP traps and manage Cisco Wireless Controllers 4400 Series +pf::Switch::Cisco::Cisco_WLC_IOS_XE - Object oriented module to parse SNMP traps and +manage Cisco Wireless Controllers Series running on Cisco IOS XE. =head1 STATUS -This module is currently only a placeholder, see L for relevant support items. +This module is currently only a placeholder, see L for relevant support items. =cut @@ -15,9 +16,9 @@ use warnings; use Net::SNMP; -use base ('pf::Switch::Cisco::WLC'); +use base ('pf::Switch::Cisco::Cisco_WLC_AireOS'); -sub description { 'Cisco Wireless (WLC) 4400 Series' } +sub description { 'Cisco WLC (IOS XE)' } =head1 AUTHOR diff --git a/lib/pf/Switch/Cisco/WLC_2100.pm b/lib/pf/Switch/Cisco/WLC_2100.pm deleted file mode 100644 index 2214df572673..000000000000 --- a/lib/pf/Switch/Cisco/WLC_2100.pm +++ /dev/null @@ -1,164 +0,0 @@ -package pf::Switch::Cisco::WLC_2100; - -=head1 NAME - -pf::Switch::Cisco::WLC_2100 - -=head1 SYNOPSIS - -The pf::Switch::Cisco::WLC_2100 module implements an object oriented interface to manage Wireless LAN Controllers. - -=head1 STATUS - -Developed and tested a long time ago on an undocumented IOS. - -With time and product line evolution, this module mostly became a placeholder, -you should see L for other relevant support items and -issues. - -=over - -=item Supports - -=over - -=item Deauthentication with RADIUS Disconnect (RFC3576) - -Requires IOS 5 or later. - -=item Deauthentication with CLI (Telnet or SSH) - -=back - -=back - -=cut - -use strict; -use warnings; - -use Net::SNMP; - -use base ('pf::Switch::Cisco::WLC'); - -use pf::constants; -use pf::config qw( - $MAC - $SSID -); -use pf::util qw(format_mac_as_cisco); - -sub description { 'Cisco Wireless (WLC) 2100 Series' } - -=head1 SUBROUTINES - -TODO: This list is incomplete - -=over - -=cut - -# CAPABILITIES -# access technology supported -# special features -use pf::SwitchSupports qw( - WirelessDot1x - WirelessMacAuth - -SaveConfig -); -# inline capabilities -sub inlineCapabilities { return ($MAC,$SSID); } - -=item _deauthenticateMacSNMP - -Deprecated: This is no longer required since IOS 5.x+. New implementation is -in pf::Switch::Cisco::WLC and relies on Disconnect-Message (RFC3576). - -Warning: this method should _never_ be called in a thread. Net::Appliance::Session is not thread -safe: - -L - -Warning: this code doesn't support elevating to privileged mode. See #900 and #1370. - -=cut - -sub _deauthenticateMacSNMP { - my ( $self, $mac ) = @_; - my $logger = $self->logger; - - $mac = format_mac_as_cisco($mac); - if ( !defined($mac) ) { - $logger->error("ERROR: MAC format is incorrect. Aborting deauth..."); - # TODO return 1, really? - return 1; - } - - my $session; - eval { - require Net::Appliance::Session; - $session = Net::Appliance::Session->new( - Host => $self->{_ip}, - Timeout => 5, - Transport => $self->{_cliTransport} - ); - $session->connect( - Name => $self->{_cliUser}, - Password => $self->{_cliPwd} - ); - # Session not already privileged are not supported at this point. See #1370 - #$session->begin_privileged( $self->{_cliEnablePwd} ); - $session->do_privileged_mode(0); - $session->begin_configure(); - }; - - if ($@) { - $logger->error( "ERROR: Can not connect to WLC $self->{'_ip'} using " - . $self->{_cliTransport} ); - return 1; - } - - #if (! $session->enable($self->{_cliEnablePwd})) { - # $logger->error("ERROR: Can not 'enable' telnet connection"); - # return 1; - #} - $session->cmd("client deauthenticate $mac"); - $session->close(); - - return 1; -} - -=back - -=head1 AUTHOR - -Inverse inc. - -=head1 COPYRIGHT - -Copyright (C) 2005-2024 Inverse inc. - -=head1 LICENSE - -This program is free software; you can redistribute it and/or -modify it under the terms of the GNU General Public License -as published by the Free Software Foundation; either version 2 -of the License, or (at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, -USA. - -=cut - -1; - -# vim: set shiftwidth=4: -# vim: set expandtab: -# vim: set backspace=indent,eol,start: diff --git a/lib/pf/Switch/Cisco/WLC_2106.pm b/lib/pf/Switch/Cisco/WLC_2106.pm deleted file mode 100644 index ccc119ff4130..000000000000 --- a/lib/pf/Switch/Cisco/WLC_2106.pm +++ /dev/null @@ -1,51 +0,0 @@ -package pf::Switch::Cisco::WLC_2106; - -=head1 NAME - -pf::Switch::Cisco::WLC_2106 - -=head1 STATUS - -This module is currently only a placeholder, see L for relevant support items. - -=cut - -use strict; -use warnings; - -use Net::SNMP; - -use base ('pf::Switch::Cisco::WLC_2100'); - -=head1 AUTHOR - -Inverse inc. - -=head1 COPYRIGHT - -Copyright (C) 2005-2024 Inverse inc. - -=head1 LICENSE - -This program is free software; you can redistribute it and/or -modify it under the terms of the GNU General Public License -as published by the Free Software Foundation; either version 2 -of the License, or (at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, -USA. - -=cut - -1; - -# vim: set shiftwidth=4: -# vim: set expandtab: -# vim: set backspace=indent,eol,start: diff --git a/lib/pf/Switch/Cisco/WLC_2500.pm b/lib/pf/Switch/Cisco/WLC_2500.pm deleted file mode 100644 index 05dd140a6975..000000000000 --- a/lib/pf/Switch/Cisco/WLC_2500.pm +++ /dev/null @@ -1,54 +0,0 @@ -package pf::Switch::Cisco::WLC_2500; - -=head1 NAME - -pf::Switch::Cisco::WLC_2500 - Object oriented module to parse SNMP traps and -manage Cisco Wireless Controllers 2500 Series - -=head1 STATUS - -This module is currently only a placeholder, see L for relevant support items. - -=cut - -use strict; -use warnings; - -use Net::SNMP; - -use base ('pf::Switch::Cisco::WLC'); - -sub description { 'Cisco Wireless (WLC) 2500 Series' } - -=head1 AUTHOR - -Inverse inc. - -=head1 COPYRIGHT - -Copyright (C) 2005-2024 Inverse inc. - -=head1 LICENSE - -This program is free software; you can redistribute it and/or -modify it under the terms of the GNU General Public License -as published by the Free Software Foundation; either version 2 -of the License, or (at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, -USA. - -=cut - -1; - -# vim: set shiftwidth=4: -# vim: set expandtab: -# vim: set backspace=indent,eol,start: diff --git a/lib/pf/Switch/Cisco/WLC_5500.pm b/lib/pf/Switch/Cisco/WLC_5500.pm deleted file mode 100644 index 4befad970a47..000000000000 --- a/lib/pf/Switch/Cisco/WLC_5500.pm +++ /dev/null @@ -1,54 +0,0 @@ -package pf::Switch::Cisco::WLC_5500; - -=head1 NAME - -pf::Switch::Cisco::WLC_5500 - Object oriented module to parse SNMP traps and -manage Cisco Wireless Controllers 5500 Series - -=head1 STATUS - -This module is currently only a placeholder, see L for relevant support items. - -=cut - -use strict; -use warnings; - -use Net::SNMP; - -use base ('pf::Switch::Cisco::WLC'); - -sub description { 'Cisco Wireless (WLC) 5500 Series' } - -=head1 AUTHOR - -Inverse inc. - -=head1 COPYRIGHT - -Copyright (C) 2005-2024 Inverse inc. - -=head1 LICENSE - -This program is free software; you can redistribute it and/or -modify it under the terms of the GNU General Public License -as published by the Free Software Foundation; either version 2 -of the License, or (at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, -USA. - -=cut - -1; - -# vim: set shiftwidth=4: -# vim: set expandtab: -# vim: set backspace=indent,eol,start: From 407bfffc0229ebb16e0a1f42308f304a4b518cf3 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Mon, 28 Oct 2024 13:36:50 -0400 Subject: [PATCH 074/176] Remove reference to port and use only socket since upgrade to mariadb10.11 https://mariadb.com/kb/en/configuring-mariadb-for-remote-client-access/ --- t/db/setup_test_db.pl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/t/db/setup_test_db.pl b/t/db/setup_test_db.pl index 646f5b46c465..8be0f1683455 100755 --- a/t/db/setup_test_db.pl +++ b/t/db/setup_test_db.pl @@ -46,9 +46,9 @@ sub apply_schema { if (!-e $schema) { die "schema '$schema' does not exists or symlink is broken\n"; } - system("mysql -h$config->{host} -P$config->{port} -u$config->{user} -p$config->{pass} $config->{db} < $schema"); + system("mysql -h$config->{host} -u$config->{user} -p$config->{pass} $config->{db} < $schema"); if ($?) { - print STDERR "mysql -h$config->{host} -P$config->{port} -u$config->{user} -p\"$config->{pass}\" $config->{db} < $schema\n"; + print STDERR "mysql -h$config->{host} -u$config->{user} -p\"$config->{pass}\" $config->{db} < $schema\n"; die "Unable to apply schema\n"; } } @@ -74,7 +74,7 @@ sub smoke_tester_db_connections { sub dsn_from_config { my ($config) = @_; - return "dbi:mysql:;host=$config->{host};port=$config->{port};mysql_client_found_rows=0;mysql_socket=/var/lib/mysql/mysql.sock"; + return "dbi:mysql:;host=$config->{host};mysql_client_found_rows=0;mysql_socket=/var/lib/mysql/mysql.sock"; } sub create_db { From d5e5952c9c00a669007edca0f2564c08f1fbae28 Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Tue, 29 Oct 2024 10:34:27 -0400 Subject: [PATCH 075/176] Added missing owner attributes in the filter engine --- lib/pf/constants/filters.pm | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/pf/constants/filters.pm b/lib/pf/constants/filters.pm index b5a3511b9356..d6c8b48556c1 100644 --- a/lib/pf/constants/filters.pm +++ b/lib/pf/constants/filters.pm @@ -212,7 +212,10 @@ our @OWNER_FIELDS = qw( owner.portal owner.source owner.nodes + owner.otp owner.password + owner.potd + owner.psk owner.valid_from owner.expiration owner.access_duration From 62b31cb64cbf87d374baff43d0c84e912439bde3 Mon Sep 17 00:00:00 2001 From: stgmsa Date: Tue, 29 Oct 2024 11:28:16 -0400 Subject: [PATCH 076/176] Feature/118 smb multi thread (#8335) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * update ascii doc adds migration script. * gunicorn to launch ntlm auth api backend api changes to support multi account * update gunicorn config file * adjust sd_notify watchdog report * remove expire flag for machine account bind * adds new option to global_config * re-organize config file load * refines imports * remove debug messages * fix imports change machine account to async auth. adds administrator docs and suggested settings. * updates admin docs * re-org threading lock, adds redis lock to avoid race conditions on machine account test. * remove unnecessary imports * remove unnecessary config file generation code move redis config to config filesˆˆ --- ...ds-domain-multi-machine-account-support.pl | 76 ++++++ bin/pyntlm_auth/app.py | 12 - bin/pyntlm_auth/config_loader.py | 192 ++++++++++++-- bin/pyntlm_auth/entrypoint.py | 101 ++++++++ bin/pyntlm_auth/global_vars.py | 22 +- bin/pyntlm_auth/gunicorn.conf.py | 86 +++++++ bin/pyntlm_auth/handlers.py | 241 +++++++++++++++--- bin/pyntlm_auth/redis_client.py | 26 ++ bin/pyntlm_auth/t_api.py | 66 ----- bin/pyntlm_auth/t_async_job.py | 132 ++++++++++ bin/pyntlm_auth/t_health_checker.py | 11 + bin/pyntlm_auth/t_sdnotify.py | 24 +- bin/pyntlm_auth/t_worker_register.py | 51 ++++ bin/pyntlm_auth/utils.py | 41 ++- containers/ntlm-auth-api/Dockerfile | 4 +- .../authentication_mechanisms.asciidoc | 1 + .../performance_optimizations.asciidoc | 79 ++++++ .../lib/pfappserver/Form/Config/Domain.pm | 8 + .../domains/_components/TheForm.vue | 7 + .../domains/_components/index.js | 1 + .../UnifiedApi/Controller/Config/Domains.pm | 84 +++--- lib/pf/services/manager/ntlm_auth_api.pm | 4 + 22 files changed, 1086 insertions(+), 183 deletions(-) create mode 100644 addons/upgrade/to-14.1-adds-domain-multi-machine-account-support.pl delete mode 100644 bin/pyntlm_auth/app.py create mode 100644 bin/pyntlm_auth/entrypoint.py create mode 100644 bin/pyntlm_auth/gunicorn.conf.py create mode 100644 bin/pyntlm_auth/redis_client.py delete mode 100644 bin/pyntlm_auth/t_api.py create mode 100644 bin/pyntlm_auth/t_async_job.py create mode 100644 bin/pyntlm_auth/t_health_checker.py create mode 100644 bin/pyntlm_auth/t_worker_register.py diff --git a/addons/upgrade/to-14.1-adds-domain-multi-machine-account-support.pl b/addons/upgrade/to-14.1-adds-domain-multi-machine-account-support.pl new file mode 100644 index 000000000000..ee70bec9723c --- /dev/null +++ b/addons/upgrade/to-14.1-adds-domain-multi-machine-account-support.pl @@ -0,0 +1,76 @@ +#!/usr/bin/perl + +=head1 NAME + +addons/upgrade/to-14.1-adds-domain-multi-machine-account-support.pl + +=cut + +=head1 DESCRIPTION + +adds a new field to domain.conf to tell PacketFence how many additional machine account are created and available to do NTLM authentication. + +=cut + +use strict; +use warnings; +use lib qw(/usr/local/pf/lib /usr/local/pf/lib_perl/lib/perl5); +use pf::IniFiles; +use pf::file_paths qw($domain_config_file); + +my $ini = pf::IniFiles->new(-file => $domain_config_file, -allowempty => 1); + +unless (defined $ini) { + print("Error loading domain config file. Terminated. Try re-run this script or edit domain settings in admin UI manually. /\n"); + exit; +} + +my $updated = 0; + +for my $section (grep {/^\S+/} $ini->Sections()) { + print("Processing section '$section' in domain.conf: "); + if ($section =~ /^[a-zA-Z0-9\-\._]+ [a-zA-Z0-9]+$/) { + if ($ini->exists($section, 'additional_machine_accounts')) { + print("already up to date. skipped.\n") + } + else { + $ini->newval($section, 'additional_machine_accounts', 0); + $updated = 1; + print("done\n") + } + } +} + +if ($updated == 1) { + $ini->RewriteConfig(); +} + + + +=head1 AUTHOR + +Inverse inc. + +=head1 COPYRIGHT + +Copyright (C) 2005-2024 Inverse inc. + +=head1 LICENSE + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, +USA. + +=cut + diff --git a/bin/pyntlm_auth/app.py b/bin/pyntlm_auth/app.py deleted file mode 100644 index 735cb806083a..000000000000 --- a/bin/pyntlm_auth/app.py +++ /dev/null @@ -1,12 +0,0 @@ -from threading import Thread - -import t_api -import t_sdnotify - -if __name__ == '__main__': - t1 = Thread(target=t_api.api) - t2 = Thread(target=t_sdnotify.sd_notify) - t1.start() - t2.start() - t1.join() - t2.join() diff --git a/bin/pyntlm_auth/config_loader.py b/bin/pyntlm_auth/config_loader.py index ee050b9df9dc..d52523b71560 100644 --- a/bin/pyntlm_auth/config_loader.py +++ b/bin/pyntlm_auth/config_loader.py @@ -2,14 +2,105 @@ import os import socket import sys +import threading import time from configparser import ConfigParser +import psutil +import redis + import config_generator import global_vars +import redis_client import utils +def expand_machine_account_list(): + r = [global_vars.s_computer_account_base] + + m = global_vars.s_computer_account_base.replace("$", "") + for i in range(global_vars.c_additional_machine_accounts): + r.append(f"{m}-{i}$") + + return r + + +def cleanup_machine_account_binding(): + machine_accounts = expand_machine_account_list() + + for m in machine_accounts: + key = f"{redis_client.namespace}:machine-account-bind:{m}" + + try: + res = redis_client.r.get(key) + except UnicodeDecodeError: + print(f"can not decode retrieved value of key: {key}. Check the value and remove the key manually.") + continue + except redis.ConnectionError: + print("redis connection error when trying to bind machine account.") + continue + except Exception as e: + print(f"unexpected error when trying to bind machine account: {type(e)}: {str(e)}") + continue + + if res is None: + continue + + if not (isinstance(res, str) and res.isdigit()): + print(f"value of key {key} is not a valid PID. Check the value and remove the key manually.") + continue + + bind_pid = int(res) + + if not psutil.pid_exists(bind_pid): + print(f"PID {bind_pid} is already died. Cleaning up.") + try: + redis_client.r.delete(key) + except Exception as e: + print(f"error occurred when trying to clean up machine account binding: key: {key}, err: {str(e)}") + + continue + + cleanup_flag = False + try: + process = psutil.Process(bind_pid) + process_name = process.name() + if process_name != "gunicorn": + cleanup_flag = True + print(f"process {bind_pid} is not a gunicorn managed process. A clean up will be performed.") + except psutil.NoSuchProcess: + print(f"no such process with PID: {bind_pid}, maybe it died right before we check it. Removing binding.") + cleanup_flag = True + except psutil.AccessDenied: + print(f"unable to access process with PID: {bind_pid}, this shouldn't happen. Cleaning up anyway.") + cleanup_flag = True + except Exception as e: + print(f"error occurred when trying to read process info: pid: {bind_pid}, {str(e)}") + continue + + if cleanup_flag: + try: + redis_client.r.delete(key) + except Exception as e: + print(f"error occurred when trying to clean up machine account binding: key: {key}, err: {str(e)}") + + +def bind_machine_account(worker_pid): + machine_accounts = expand_machine_account_list() + for m in machine_accounts: + try: + key = f"{redis_client.namespace}:machine-account-bind:{m}" + res = redis_client.r.set(name=key, value=worker_pid, nx=True) + if res is True: + return m + except redis.ConnectionError: + print("redis connection error when trying to bind machine account.") + except Exception as e: + print(f"unexpected error when trying to bind machine account: {str(e)}") + + return None + + def get_boolean_value(v): false_dict = ('', '0', 'no', 'n', 'false', 'off', 'disabled') @@ -31,21 +122,26 @@ def get_int_value(v): try: ret = int(v) return ret, None - except ValueError as e: + except ValueError: return None, 'Value error, can not convert specified value to int' except Exception as e: - return None, 'General error, can not convert specified value to int' + return None, f'General error, can not convert specified value to int: {str(e)}' def config_load(): - global_vars.c_listen_port = os.getenv("LISTEN") - global_vars.c_domain_identifier = socket.gethostname() + " " + os.getenv("IDENTIFIER") + _LISTEN = os.getenv("LISTEN") + if _LISTEN is None or _LISTEN == "": + print("parameter LISTEN not found in system environment. unable to start ntlm-auth-api.") + sys.exit(1) + global_vars.c_listen_port = _LISTEN - if global_vars.c_domain_identifier == "" or global_vars.c_listen_port == "": - print("Unable to start ntlm-auth-api: 'IDENTIFIER' or 'LISTEN' is missing.") - exit(1) + _IDENTIFIER = os.getenv("IDENTIFIER") + if _IDENTIFIER is None or _IDENTIFIER == "": + print("parameter IDENTIFIER not found in system environment. unable to start ntlm-auth-api.") + sys.exit(1) + global_vars.c_domain_identifier = socket.gethostname() + " " + _IDENTIFIER - print(f"ntlm-auth-api@{global_vars.c_domain_identifier} on port {global_vars.c_listen_port}...") + print(f"ntlm-auth-api@{_IDENTIFIER} is starting on port {global_vars.c_listen_port}.") identifier = global_vars.c_domain_identifier @@ -65,23 +161,23 @@ def config_load(): print(f" Error loading config from domain.conf: {e}. Terminated.") sys.exit(1) - conf_db = f"/usr/local/pf/var/conf/ntlm-auth-api.d/db.ini" - cp_db = ConfigParser(interpolation=None) - print(f"Load database config from {conf_db}") + server_name_raw = cp_dm.get(identifier, 'server_name') + + additional_machine_accounts = 0 try: - with open(conf_db, 'r') as file: - cp_db.read_file(file) - if 'DB' not in cp_db: - print(f" Section [DB] not found, ntlm-auth-api starts without NT Key caching capability.") - except FileNotFoundError: - print(f" {conf_db} not found, ntlm-auth-api@{identifier} starts without NT Key caching capability.") - except configparser.Error as e: - print(f" Error loading {conf_db}: {e}, ntlm-auth-api@{identifier} starts without NT Key caching capability.") + additional_machine_accounts = cp_dm.get(identifier, 'additional_machine_accounts') + additional_machine_accounts = int(additional_machine_accounts) + except Exception as e: + print(f" failed loading additional_machine_accounts: {str(e)}. using 0 as default.") + + if additional_machine_accounts < 0 or additional_machine_accounts > 10: + additional_machine_accounts = 0 + print(f" invalid additional machine account range, using 0 as default.") - server_name_raw = cp_dm.get(identifier, 'server_name') server_name_or_hostname = server_name_raw - if server_name_raw.strip() == "%h": - server_name_or_hostname = socket.gethostname().split(".")[0] + if "%h" in server_name_or_hostname.strip(): + ph = socket.gethostname().split(".")[0] + server_name_or_hostname = server_name_or_hostname.replace("%h", ph) ad_fqdn = cp_dm.get(identifier, 'ad_fqdn') ad_server = cp_dm.get(identifier, 'ad_server') @@ -104,6 +200,40 @@ def config_load(): ad_reset_account_lockout_count_after = cp_dm.get(identifier, 'ad_reset_account_lockout_counter_after', fallback=0) ad_old_password_allowed_period = cp_dm.get(identifier, 'ad_old_password_allowed_period', fallback=60) + conf_db = f"/usr/local/pf/var/conf/ntlm-auth-api.d/db.ini" + cp_db = ConfigParser(interpolation=None) + print(f"Load database config from {conf_db}") + try: + with open(conf_db, 'r') as file: + cp_db.read_file(file) + except FileNotFoundError: + print(f" {conf_db} not found, ntlm-auth-api@{identifier} terminated.") + sys.exit(1) + except configparser.Error as e: + print(f" Error loading {conf_db}: {e}, ntlm-auth-api@{identifier} terminated.") + sys.exit(1) + + if 'CACHE' not in cp_db: + print(f" section [CACHE] not found, ntlm-auth-api@{identifier} terminated.") + sys.exit(1) + + c_cache_host = cp_db.get('CACHE', 'CACHE_HOST', fallback=None) + c_cache_port = cp_db.get('CACHE', 'CACHE_PORT', fallback=None) + if c_cache_host is None or c_cache_port is None: + print(f" unable to load 'CACHE_HOST', 'CACHE_PORT' from config, ntlm-auth-api@{identifier} terminated.") + sys.exit(1) + + if c_cache_port.isdigit() and 0 < int(c_cache_port) < 65536: + c_cache_port = int(c_cache_port) + else: + print(f" unable to parse CACHE_PORT, value must be a valid port within 1..65535.") + sys.exit(1) + + print(f" redis://{c_cache_host}:{c_cache_port}") + + if 'DB' not in cp_db: + print(f" Section [DB] not found, ntlm-auth-api starts without NT Key caching capability.") + c_db_host = cp_db.get('DB', "DB_HOST", fallback=None) c_db_port = cp_db.get('DB', "DB_PORT", fallback=None) c_db_user = cp_db.get('DB', "DB_USER", fallback=None) @@ -251,6 +381,7 @@ def config_load(): global_vars.c_workgroup = workgroup global_vars.c_username = username global_vars.c_password = password + global_vars.c_additional_machine_accounts = additional_machine_accounts global_vars.c_netbios_name = netbios_name global_vars.c_workstation = workstation global_vars.c_server_string = server_string @@ -272,3 +403,20 @@ def config_load(): global_vars.c_db_pass = c_db_pass global_vars.c_db = c_db global_vars.c_db_unix_socket = c_db_unix_socket + + global_vars.c_cache_host = c_cache_host + global_vars.c_cache_port = c_cache_port + + global_vars.s_computer_account_base = username + + +def reload_worker_config(): + global_vars.s_lock = threading.Lock() + computer_account = global_vars.s_bind_account.replace("$", "") + + global_vars.c_username = computer_account.upper() + "$" + global_vars.c_netbios_name = computer_account.upper() + global_vars.c_workstation = computer_account.upper() + global_vars.c_server_string = computer_account + + global_vars.s_password_ro = global_vars.c_password diff --git a/bin/pyntlm_auth/entrypoint.py b/bin/pyntlm_auth/entrypoint.py new file mode 100644 index 000000000000..8881d00729f5 --- /dev/null +++ b/bin/pyntlm_auth/entrypoint.py @@ -0,0 +1,101 @@ +import logging +import os +import time +from threading import Thread + +import pymysql +from flask import Flask, g, request +from flaskext.mysql import MySQL + +import config_loader +import global_vars +import handlers + +import t_health_checker +import t_async_job + +app = Flask(__name__) + +time.sleep(1) +worker_pid = os.getpid() +master_pid = os.getppid() + +config_loader.cleanup_machine_account_binding() + +while True: + m = config_loader.bind_machine_account(worker_pid) + if m is not None: + global_vars.s_bind_account = m + break + + global_vars.s_worker.log.warning(f"failed to bind machine account: no available accounts, retrying.") + time.sleep(1) + +config_loader.reload_worker_config() +global_vars.s_worker.log.info(f"successfully registered with machine account '{m}', ready to handle requests.") + +flask_jobs = ( + Thread(target=t_async_job.async_auth, daemon=False, args=(global_vars.s_worker,)), + Thread(target=t_async_job.async_test, daemon=False, args=(global_vars.s_worker,)), + Thread(target=t_health_checker.health_check, daemon=True, args=(global_vars.s_worker,)) +) + +for job in flask_jobs: + job.start() + +werkzeug_logger = logging.getLogger('werkzeug') + + +@app.before_request +def register_logger(): + if request.path.startswith("/ping"): + werkzeug_logger.setLevel(logging.CRITICAL) + else: + werkzeug_logger.setLevel(logging.INFO) + + +for i in range(1): + if not global_vars.c_nt_key_cache_enabled: + break + + c_db_port, err = config_loader.get_int_value(global_vars.c_db_port) + if err is not None: + global_vars.c_nt_key_cache_enabled = False + break + + app.config['MYSQL_DATABASE_HOST'] = global_vars.c_db_host + app.config['MYSQL_DATABASE_PORT'] = int(global_vars.c_db_port) + app.config['MYSQL_DATABASE_USER'] = global_vars.c_db_user + app.config['MYSQL_DATABASE_PASSWORD'] = global_vars.c_db_pass + app.config['MYSQL_DATABASE_DB'] = global_vars.c_db + app.config['MYSQL_DATABASE_CHARSET'] = 'utf8mb4' + app.config['MYSQL_DATABASE_SOCKET'] = global_vars.c_db_unix_socket + + mysql = MySQL(autocommit=True, cursorclass=pymysql.cursors.DictCursor) + mysql.init_app(app) + + + @app.before_request + def before_request(): + try: + g.db = mysql.get_db().cursor() + except Exception as e: + e_code = e.args[0] + e_msg = str(e) + print(f" error while init database: {e_code}, {e_msg}. Started without NT Key cache capability.") + + + @app.teardown_request + def teardown_request(exception=None): + if hasattr(g, 'db'): + g.db.close() + +app.route('/ntlm/auth', methods=['POST'])(handlers.ntlm_auth_handler) +app.route('/ntlm/expire', methods=['POST'])(handlers.ntlm_expire_handler) +app.route('/event/report', methods=['POST'])(handlers.event_report_handler) +app.route('/ntlm/connect', methods=['GET'])(handlers.ntlm_connect_handler) +app.route('/ntlm/connect', methods=['POST'])(handlers.ntlm_connect_handler_with_password) +app.route('/ping', methods=['GET'])(handlers.ping_handler) + +if __name__ == '__main__': + app.run() diff --git a/bin/pyntlm_auth/global_vars.py b/bin/pyntlm_auth/global_vars.py index 4d1a6dfbeb52..a959126bad79 100644 --- a/bin/pyntlm_auth/global_vars.py +++ b/bin/pyntlm_auth/global_vars.py @@ -1,5 +1,4 @@ import datetime -import threading _global_dict = {} @@ -27,7 +26,12 @@ def get_value(key, value=None): s_reconnect_id = 0 s_connection_last_active_time = datetime.datetime.now() -s_lock = threading.Lock() +s_lock = None + +s_worker = None # gunicorn worker object. +s_bind_account = None # machine account bind to specific worker +s_computer_account_base = None +s_password_ro = None # machine account password loaded from config file # config for domain.conf - AD c_netbios_name = None @@ -36,6 +40,7 @@ def get_value(key, value=None): c_workgroup = None c_workstation = None c_password = None +c_additional_machine_accounts = None c_domain = None c_username = None c_server_name = None @@ -51,14 +56,17 @@ def get_value(key, value=None): c_db = None c_db_unix_socket = None +# config for domain.conf - redis cache +c_cache_host = None +c_cache_port = None + # config for domain.conf - nt key cache c_nt_key_cache_enabled = None c_nt_key_cache_expire = None -c_ad_account_lockout_threshold = 0 # 0..999. Default=0, never locks -c_ad_account_lockout_duration = None # Default not set -c_ad_reset_account_lockout_counter_after = None # Default not set -c_ad_old_password_allowed_period = None # Windows 2003+, Default not set, if not set, 60 +c_ad_account_lockout_threshold = 0 # 0..999. Default=0, never locks +c_ad_account_lockout_duration = None # Default not set +c_ad_reset_account_lockout_counter_after = None # Default not set +c_ad_old_password_allowed_period = None # Windows 2003+, Default not set, if not set, 60 c_max_allowed_password_attempts_per_device = None - diff --git a/bin/pyntlm_auth/gunicorn.conf.py b/bin/pyntlm_auth/gunicorn.conf.py new file mode 100644 index 000000000000..12dc1adbdb2c --- /dev/null +++ b/bin/pyntlm_auth/gunicorn.conf.py @@ -0,0 +1,86 @@ +import os +import signal +import sys +from threading import Thread + +import config_loader +import global_vars +import redis_client +import t_async_job +import t_health_checker +import t_sdnotify +import t_worker_register + +NAME = "NTLM Auth API" + +try: + LISTEN = os.getenv("LISTEN") + bind_port = int(LISTEN) +except ValueError: + print(f"invalid value for environment variable 'LISTEN', {NAME} terminated.") + sys.exit(1) +except Exception as e: + print(f"failed to extract parameter 'LISTEN' from environment variable: {str(e)}. {NAME} terminated.") + sys.exit(1) + +config_loader.config_load() +worker_num = global_vars.c_additional_machine_accounts + 1 + +wsgi_app = 'entrypoint:app' + +bind = f"0.0.0.0:{bind_port}" +backlog = 2048 +workers = worker_num +worker_class = 'sync' +timeout = 30 +graceful_timeout = 10 + +accesslog = '-' +errorlog = '-' +loglevel = 'info' +capture_output = False +access_log_format = '%(h)s %(l)s %(u)s %(p)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"' + +keepalive = 2 +max_requests = 10000 +max_requests_jitter = 50 + +daemon = False + +limit_request_line = 4094 +limit_request_fields = 100 +limit_request_field_size = 8190 + +reload = False + + +def on_exit(server): + server.log.info(f"master process on-exit clean up started...") + server.log.info(f"master process on-exit clean up done...") + + +def worker_exit(server, worker): + worker.log.info(f"worker process pre-exit clean up started, sending thread stop event...") + + t_async_job.stop_event.set() + t_health_checker.stop_event.set() + t_worker_register.stop_event.set() + t_sdnotify.stop_event.set() + + +def post_fork(server, worker): + worker.log.info(f"post fork hook: worker spawned with PID of {worker.pid} by master {server.pid}") + global_vars.s_worker = worker + + if not redis_client.init_connection(): + worker.log.error("unable to initialize redis connection, worker failed to start") + os.kill(server.pid, signal.SIGTERM) + + background_jobs = ( + Thread(target=t_worker_register.primary_worker_register, daemon=True, args=(worker,)), + Thread(target=t_sdnotify.sd_notify, daemon=True, args=(worker,)), + Thread(target=t_worker_register.gc_expire_redis_locks, daemon=True, args=(worker,)) + ) + + for job in background_jobs: + job.start() diff --git a/bin/pyntlm_auth/handlers.py b/bin/pyntlm_auth/handlers.py index f1e4cd7e794f..c61b2318fec2 100644 --- a/bin/pyntlm_auth/handlers.py +++ b/bin/pyntlm_auth/handlers.py @@ -1,17 +1,21 @@ -from http import HTTPStatus -from flask import Flask, request, g -import ncache -import re -import hashlib import binascii -import json -import utils -import ms_event +import hashlib +import re +import time +from http import HTTPStatus + +import redis +from flask import request, g +from samba import ntstatus + +import config_loader +import flags import global_vars +import ms_event +import ncache +import redis_client import rpc -import flags -from samba import param, NTSTATUSError, ntstatus # For NTSTATUS, see: # https://github.com/samba-team/samba/blob/master/libcli/util/ntstatus_err_table.txt @@ -42,48 +46,213 @@ def ping_handler(): def ntlm_connect_handler(): - with global_vars.s_lock: - global_vars.s_reconnect_id = global_vars.s_connection_id + machine_accounts = config_loader.expand_machine_account_list() - global_vars.s_secure_channel_connection, global_vars.s_machine_cred, global_vars.s_connection_id, error_code, error_message = rpc.get_secure_channel_connection() + mapping, code, msg = _build_machine_account_bind_mapping(machine_accounts) + if code != 0: + return msg, code - if error_code == ntstatus.NT_STATUS_ACCESS_DENIED: - return "Test machine account failed. Access Denied", HTTPStatus.UNAUTHORIZED - if error_code != 0: - return "Error while establishing secure channel connection: " + error_message, HTTPStatus.INTERNAL_SERVER_ERROR + job_id, code, msg = _submit_machine_account_test_job(machine_accounts) + if code != 0: + return msg, code - return "OK", HTTPStatus.OK + results = _poll_machine_account_test_job_results(job_id, machine_accounts) + return _aggregate_results(results, machine_accounts) -def test_password_handler(): +def ntlm_connect_handler_with_password(): data = request.get_json() if data is None: - return 'No JSON payload found in request', HTTPStatus.BAD_REQUEST + return "Invalid JSON payload: decoding failed\n", HTTPStatus.BAD_REQUEST + if 'password' not in data: - return 'Invalid JSON payload format, missing required key: password', HTTPStatus.UNPROCESSABLE_ENTITY + return "Invalid JSON payload: missing required field 'password'\n", HTTPStatus.UNPROCESSABLE_ENTITY - test_password = data['password'].strip() + nt_hash = "" + password = data['password'].strip() + if password: + if re.search(r'^[a-fA-F0-9]{32}$', password): + nt_hash = password + else: + nt4_digest = hashlib.new('md4', password.encode('utf-16le')).digest() + nt_hash = binascii.hexlify(nt4_digest).decode('utf-8') - if re.search(r'^[a-fA-F0-9]{32}$', test_password): - nt_hash = test_password - else: - nt4_digest = hashlib.new('md4', test_password.encode('utf-16le')).digest() - nt_hash = binascii.hexlify(nt4_digest).decode('utf-8') + machine_account = "" + if ('machine_account' in data) and data["machine_account"].strip(): + machine_account = data["machine_account"].strip() + if "$" not in machine_account: + machine_account = f"{machine_account}$" + + machine_accounts = config_loader.expand_machine_account_list() + if machine_account and (machine_account not in machine_accounts): + msg = f"Machine account '{machine_account}' does not match current machine account name patterns" + return msg, HTTPStatus.NOT_FOUND + + if machine_account: + machine_accounts = [machine_account] + + mapping, code, msg = _build_machine_account_bind_mapping(machine_accounts) + if code != 0: + return msg, code + + job_id, code, msg = _submit_machine_account_test_job(machine_accounts, nt_hash) + if code != 0: + return msg, code + + results = _poll_machine_account_test_job_results(job_id, machine_accounts) + return _aggregate_results(results, machine_accounts) + + +def _build_machine_account_bind_mapping(machine_accounts): + mapping = {} + + for m in machine_accounts: + key = f"{redis_client.namespace}:machine-account-bind:{m}" + + try: + res = redis_client.r.get(name=key) + if res is None: + msg = f"error fetching machine account binding. machine account test failed: no binding for account {m}" + code = HTTPStatus.INTERNAL_SERVER_ERROR + return {}, code, msg + else: + mapping[m] = res + + except redis.ConnectionError: + msg = "error fetching machine account binding. machine account test failed: redis connection error." + code = HTTPStatus.INTERNAL_SERVER_ERROR + return {}, code, msg + + except Exception as e: + msg = f"error fetching machine account binding. machine account test failed: {str(e)}." + code = HTTPStatus.INTERNAL_SERVER_ERROR + return {}, code, msg + + return mapping, 0, "" + + +def _submit_machine_account_test_job(machine_accounts, password=""): + job_id = time.time() + + key_lock = f"{redis_client.namespace}:async-test:lock:{job_id}" + try: + redis_client.r.lpush(key_lock, 1) + except redis.ConnectionError: + msg = "error submitting machine account test job: lock init failed due to redis connection error" + code = HTTPStatus.INTERNAL_SERVER_ERROR + return None, code, msg + except Exception as e: + msg = f"error submitting machine account test job: lock init failed due to {str(e)}" + code = HTTPStatus.INTERNAL_SERVER_ERROR + return None, code, msg + + for m in machine_accounts: + key = f"{redis_client.namespace}:async-test:jobs:{m}" + + if password.strip() == "": + value = job_id + else: + value = f"{job_id}:{password.strip()}" + + try: + redis_client.r.lpush(key, value) + + except redis.ConnectionError: + msg = "error fetching machine account binding. machine account test failed: redis connection error." + code = HTTPStatus.INTERNAL_SERVER_ERROR + return None, code, msg + + except Exception as e: + msg = f"error fetching machine account binding. machine account test failed: {str(e)}." + code = HTTPStatus.INTERNAL_SERVER_ERROR + return None, code, msg + + return job_id, 0, "" + + +def _poll_machine_account_test_job_results(job_id, machine_accounts): + exp_time = job_id + 2 + results = {} + + while time.time() < exp_time: + time.sleep(0.3) + + for m in machine_accounts: + if m in results and results[m]["status"] in ("OK", "Failed"): + continue + + key = f"{redis_client.namespace}:async-test:results:{job_id}:{m}" + + try: + res = redis_client.r.get(key) + + if res is None: + continue + + if res == "OK": + results[m] = {"status": "OK", "reason": None} + continue + + if isinstance(res, str) and res != "OK": + results[m] = {"status": "Failed", "reason": res} + continue + + except redis.ConnectionError: + results[m] = {"status": "Exception", "reason": f"redis connection issue when fetching job result"} + except Exception as e: + results[m] = {"status": "Exception", "reason": f"redis error '{str(e)}' when fetching job result"} + return results + + +def _aggregate_results(results, machine_accounts): + timeout = [] + successful = [] + failed = [] + exception = [] + + for m in machine_accounts: + if m not in results: + timeout.append(m) + continue + + if results[m]["status"] == "OK": + successful.append(m) + continue + + if results[m]["status"] == "Failed": + failed.append(m) + continue + + if results[m]["status"] == "Exception": + exception.append(m) + continue + + if (not timeout) and (not failed) and (not exception): + return "OK\n", HTTPStatus.OK - global_vars.c_password = nt_hash + s_successful = "" + if successful: + s_successful = "successful: " + ", ".join(successful) + "\n" - with global_vars.s_lock: - global_vars.s_reconnect_id = global_vars.s_connection_id + s_timeout = "" + if timeout: + s_timeout = "timeout: " + ", ".join(timeout) + "\n" - global_vars.s_secure_channel_connection, global_vars.s_machine_cred, global_vars.s_connection_id, error_code, error_message = rpc.get_secure_channel_connection() + s_failed = "" + if failed: + s_failed = "Failed: \n" + \ + "\n".join(f" {i}: {results[i]['status']}: {results[i]['reason']}" for i in failed) + \ + "\n" - if error_code == ntstatus.NT_STATUS_ACCESS_DENIED: - return "Test machine account failed. Access Denied", HTTPStatus.UNAUTHORIZED - if error_code != 0: - return "Error while establishing secure channel connection: " + error_message, HTTPStatus.INTERNAL_SERVER_ERROR + s_exception = "" + if exception: + s_exception = "Exception occurred: \n" + \ + "\n".join(f" {i}: {results[i]['status']}: {results[i]['reason']}" for i in exception) + \ + "\n" - return "OK", HTTPStatus.OK + msg = f"machine account test (partially) failed: \n{s_successful}{s_timeout}{s_failed}{s_exception}" + return f"{msg}\n", HTTPStatus.UNPROCESSABLE_ENTITY def ntlm_auth_handler(): diff --git a/bin/pyntlm_auth/redis_client.py b/bin/pyntlm_auth/redis_client.py new file mode 100644 index 000000000000..bfb8f9259558 --- /dev/null +++ b/bin/pyntlm_auth/redis_client.py @@ -0,0 +1,26 @@ +import redis + +import global_vars + +r = None +namespace = "ntlm-auth" + + +def init_connection(): + global r + r = redis.StrictRedis( + host=global_vars.c_cache_host, + port=global_vars.c_cache_port, + db=0, + decode_responses=True, + socket_timeout=5, + retry_on_timeout=True + ) + + try: + r.ping() + return True + except Exception as e: + print(f"unable to establish redis connection: {str(e)}") + + return False diff --git a/bin/pyntlm_auth/t_api.py b/bin/pyntlm_auth/t_api.py deleted file mode 100644 index e38e756e6e65..000000000000 --- a/bin/pyntlm_auth/t_api.py +++ /dev/null @@ -1,66 +0,0 @@ -import logging -import pymysql -from flask import Flask, g, request -from flaskext.mysql import MySQL - -import config_loader -import global_vars -import handlers - - -def api(): - config_loader.config_load() - - app = Flask(__name__) - - werkzeug_logger = logging.getLogger('werkzeug') - - @app.before_request - def register_logger(): - if request.path.startswith("/ping"): - werkzeug_logger.setLevel(logging.CRITICAL) - else: - werkzeug_logger.setLevel(logging.INFO) - - for i in range(1): - if not global_vars.c_nt_key_cache_enabled: - break - - c_db_port, err = config_loader.get_int_value(global_vars.c_db_port) - if err is not None: - global_vars.c_nt_key_cache_enabled = False - break - - app.config['MYSQL_DATABASE_HOST'] = global_vars.c_db_host - app.config['MYSQL_DATABASE_PORT'] = int(global_vars.c_db_port) - app.config['MYSQL_DATABASE_USER'] = global_vars.c_db_user - app.config['MYSQL_DATABASE_PASSWORD'] = global_vars.c_db_pass - app.config['MYSQL_DATABASE_DB'] = global_vars.c_db - app.config['MYSQL_DATABASE_CHARSET'] = 'utf8mb4' - app.config['MYSQL_DATABASE_SOCKET'] = global_vars.c_db_unix_socket - - mysql = MySQL(autocommit=True, cursorclass=pymysql.cursors.DictCursor) - mysql.init_app(app) - - @app.before_request - def before_request(): - try: - g.db = mysql.get_db().cursor() - except Exception as e: - e_code = e.args[0] - e_msg = str(e) - print(f" error while init database: {e_code}, {e_msg}. Started without NT Key cache capability.") - - @app.teardown_request - def teardown_request(exception=None): - if hasattr(g, 'db'): - g.db.close() - - app.route('/ntlm/auth', methods=['POST'])(handlers.ntlm_auth_handler) - app.route('/ntlm/expire', methods=['POST'])(handlers.ntlm_expire_handler) - app.route('/event/report', methods=['POST'])(handlers.event_report_handler) - app.route('/ntlm/connect', methods=['GET'])(handlers.ntlm_connect_handler) - app.route('/ntlm/connect', methods=['POST'])(handlers.test_password_handler) - app.route('/ping', methods=['GET'])(handlers.ping_handler) - - app.run(threaded=True, host='0.0.0.0', port=int(global_vars.c_listen_port)) diff --git a/bin/pyntlm_auth/t_async_job.py b/bin/pyntlm_auth/t_async_job.py new file mode 100644 index 000000000000..f8154275a738 --- /dev/null +++ b/bin/pyntlm_auth/t_async_job.py @@ -0,0 +1,132 @@ +import datetime +import time +from threading import Event + +import redis + +import global_vars +import redis_client +import rpc + +stop_event = Event() + + +def async_auth(worker): + key = f"{redis_client.namespace}:async-auth:{worker.pid}" + + while not stop_event.is_set(): + try: + res = redis_client.r.lpop(name=key) + if res is None: + time.sleep(0.5) + else: + worker.log.info(f"Data is: {res}") + + except redis.ConnectionError: + worker.log.warning(f"failed fetching async auth job: key = '{key}': redis connectivity issue.") + time.sleep(1) + except Exception as e: + worker.log.warning(f"failed fetching async auth job: key = '{key}': error = {str(e)}") + time.sleep(1) + worker.log.info("Thread 'async_auth' is done.") + + +def async_test(worker): + bind_account = global_vars.s_bind_account + key = f"{redis_client.namespace}:async-test:jobs:{bind_account}" + + while not stop_event.is_set(): + try: + res = redis_client.r.rpop(name=key) + if res is None: + time.sleep(0.5) + else: + s = res.split(":") + job_time = s[0] + password = "" + if len(s) == 2: + password = s[1] + + try: + job_time_f = float(job_time) + if job_time_f + 2 < time.time(): + job_time_fmt = datetime.datetime.fromtimestamp(job_time_f).strftime("%Y-%m-%d %H:%M:%S.%f") + worker.log.warning(f"deprecated job submitted at {job_time_fmt}, dropped. payload: {res}") + else: + worker.log.info(f"deal machine account test for: {bind_account} with password '{password}'") + _test_schannel(job_time, bind_account, password) + except Exception as e: + worker.log.warning(f"can not convert job_id '{job_time}' to float number: {str(e)}, payload: {res}") + + except redis.ConnectionError: + worker.log.warning(f"failed fetching async test job: key = '{key}': redis connectivity issue.") + time.sleep(1) + except Exception as e: + worker.log.warning(f"failed fetching async test job: key = '{key}': error = {str(e)}") + time.sleep(1) + worker.log.info("Thread 'async_test' is done.") + + +def _test_schannel(job_id, machine_account, password=""): + + key_lock = f"{redis_client.namespace}:async-test:lock:{job_id}" + try: + v = redis_client.r.brpop(key_lock, 2) + if v is None: + msg = f"lock '{key_lock}' wait timed out. job '{job_id}' failed." + global_vars.s_worker.log.warning(msg) + return + + except redis.ConnectionError: + msg = f"redis connection error occurred when obtaining lock '{key_lock}', job '{job_id}' failed." + global_vars.s_worker.log.warning(msg) + return + except Exception as e: + msg = f"error occurred when obtaining lock '{key_lock}': {str(e)}, job '{job_id}' failed." + global_vars.s_worker.log.warning(msg) + return + + if not password: + password = global_vars.s_password_ro + + with global_vars.s_lock: + global_vars.s_reconnect_id = global_vars.s_connection_id + global_vars.c_password = password + + ( + global_vars.s_secure_channel_connection, + global_vars.s_machine_cred, + global_vars.s_connection_id, + error_code, error_message + ) = rpc.get_secure_channel_connection() + + with global_vars.s_lock: + global_vars.c_password = global_vars.s_password_ro + + try: + redis_client.r.lpush(key_lock, global_vars.s_worker.pid) + except redis.ConnectionError: + msg = f"redis connection error occurred when releasing lock '{key_lock}', job '{job_id}' failed." + global_vars.s_worker.log.warning(msg) + return + except Exception as e: + msg = f"error occurred when releasing lock '{key_lock}': {str(e)}, job '{job_id}' failed." + global_vars.s_worker.log.warning(msg) + return + + if error_code == 0: + result = "OK" + else: + result = f"error code: {error_code}, error message: {error_message}" + # typically, we'll get an NT_STATUS_ACCESS_DENIED error is password is wrong. + + key = f"{redis_client.namespace}:async-test:results:{job_id}:{machine_account}" + + try: + redis_client.r.set(name=key, value=result, ex=5) + except redis.ConnectionError: + msg = f"redis connection error occurred when writing async test job (id = '{job_id}') results: {result}" + global_vars.s_worker.log.warning(msg) + except Exception as e: + msg = f"error '{str(e)}' occurred when writing async test job (id = '{job_id}') results: {result}" + global_vars.s_worker.log.warning(msg) diff --git a/bin/pyntlm_auth/t_health_checker.py b/bin/pyntlm_auth/t_health_checker.py new file mode 100644 index 000000000000..37e60994ac7c --- /dev/null +++ b/bin/pyntlm_auth/t_health_checker.py @@ -0,0 +1,11 @@ +import time +from threading import Event + +stop_event = Event() + + +# we'll do health check and schannel keep alive here. +def health_check(worker): + while not stop_event.is_set(): + # print(f"health check on {worker.pid}") + time.sleep(30) diff --git a/bin/pyntlm_auth/t_sdnotify.py b/bin/pyntlm_auth/t_sdnotify.py index 03cc366454ce..879fe58fb7bc 100644 --- a/bin/pyntlm_auth/t_sdnotify.py +++ b/bin/pyntlm_auth/t_sdnotify.py @@ -1,14 +1,26 @@ import time +from threading import Event import sdnotify +stop_event = Event() -def sd_notify(): + +def sd_notify(worker): n = sdnotify.SystemdNotifier() n.notify("READY=1") - count = 1 - while True: - # print("Running... {}".format(count)) - n.notify("STATUS=Count is {}".format(count)) + + count = 0 + while not stop_event.is_set(): + + if count % 30 == 0: + message = "WATCHDOG=1" + n.notify(message) + + message = "STATUS=Count is {}".format(count) + n.notify(message) + + count = 0 + count += 1 - time.sleep(30) + time.sleep(1) diff --git a/bin/pyntlm_auth/t_worker_register.py b/bin/pyntlm_auth/t_worker_register.py new file mode 100644 index 000000000000..12eaadccb369 --- /dev/null +++ b/bin/pyntlm_auth/t_worker_register.py @@ -0,0 +1,51 @@ +import time +from threading import Event + +import redis + +import redis_client + +stop_event = Event() +is_me = Event() + + +def primary_worker_register(worker): + key = f"{redis_client.namespace}:primary_worker" + while not stop_event.is_set(): + try: + res = redis_client.r.set(name=key, value=worker.pid, nx=True, ex=5, get=True) + if res is None: + worker.log.info(f"primary worker is registered on PID: {worker.pid}.") + is_me.set() + if str(worker.pid) == res: + redis_client.r.expire(name=key, time=10, xx=True, gt=True) + + except redis.ConnectionError: + worker.log.warning("failed registering primary worker: redis connection error.") + except Exception as e: + worker.log.warning(f"failed registering primary worker: {str(e)}") + + time.sleep(2) + + +def gc_expire_redis_locks(worker): + while not stop_event.is_set(): + if is_me.is_set(): + try: + keys_iter = redis_client.r.scan_iter(match=f"{redis_client.namespace}:async-test:lock:*", count=10) + for key in keys_iter: + parts = key.split(":") + if len(parts) == 4: + try: + job_time_f = float(parts[3]) + if time.time() - job_time_f > 60: + redis_client.r.delete(key) + except Exception as e: + msg = f"error '{str(e)}' occurred when trying to remove expired lock key: '{key}'" + worker.log.warning(msg) + except redis.ConnectionError: + worker.log.warning(f"can not scanning expired redis lock keys, redis connection error.") + except Exception as e: + worker.log.warning(f"can not scanning expired redis lock keys: {str(e)}.") + + time.sleep(10) diff --git a/bin/pyntlm_auth/utils.py b/bin/pyntlm_auth/utils.py index c14f28c13db8..c98cf14ce950 100644 --- a/bin/pyntlm_auth/utils.py +++ b/bin/pyntlm_auth/utils.py @@ -1,9 +1,14 @@ import datetime +import inspect import re -import constants + import dns.resolver +import psutil import pytz +import constants + + # simplified IPv4 validator. def is_ipv4(address): ipv4_pattern = re.compile(r'^(\d{1,3}\.){3}\d{1,3}$') @@ -97,7 +102,7 @@ def expires(in_second): def now(): - ts= datetime.datetime.now().timestamp() + ts = datetime.datetime.now().timestamp() return int(ts) @@ -110,3 +115,35 @@ def extract_event_timestamp(s): return int(number) else: return 0 + + +def get_process_info(pid): + try: + process = psutil.Process(pid) + process_name = process.name() + process_cmdline = process.cmdline() + process_status = process.status() + process_cpu_percent = process.cpu_percent(interval=1.0) + process_memory_info = process.memory_info() + process_create_time = process.create_time() + + return { + "pid": pid, + "name": process_name, + "cmdline": process_cmdline, + "status": process_status, + "cpu_percent": process_cpu_percent, + "memory_info": process_memory_info, + "create_time": process_create_time, + }, None + except psutil.NoSuchProcess: + return None, psutil.NoSuchProcess + except psutil.AccessDenied: + return None, psutil.AccessDenied + except Exception as e: + return None, e + + +def current_function_name(): + # Get the name of the current function + return inspect.currentframe().f_code.co_name diff --git a/containers/ntlm-auth-api/Dockerfile b/containers/ntlm-auth-api/Dockerfile index 6eec15a8283f..5509152c9bda 100644 --- a/containers/ntlm-auth-api/Dockerfile +++ b/containers/ntlm-auth-api/Dockerfile @@ -7,7 +7,7 @@ WORKDIR /usr/local/pf/ COPY bin bin RUN apt-get -qq update && \ - apt-get -yqq install python3-pip python3-pymysql python3-sdnotify python3-tz python3-dev + apt-get -yqq install python3-pip python3-pymysql python3-sdnotify python3-tz python3-dev gunicorn3 python3-psutil RUN VER=`python3 -c 'import sys; val=sys.version_info;print(str(val.major)+"."+str(val.minor))'` ; \ sudo rm -rf /usr/lib/python$VER/EXTERNALLY-MANAGED && \ @@ -15,4 +15,4 @@ RUN VER=`python3 -c 'import sys; val=sys.version_info;print(str(val.major)+"."+s COPY addons/ntlm-auth-api/openssl.cnf /usr/lib/ssl/openssl.cnf -ENTRYPOINT /usr/bin/python3 /usr/local/pf/bin/pyntlm_auth/app.py +ENTRYPOINT /usr/bin/gunicorn -c /usr/local/pf/bin/pyntlm_auth/gunicorn.conf.py diff --git a/docs/installation/authentication_mechanisms.asciidoc b/docs/installation/authentication_mechanisms.asciidoc index 922c8623807f..324088910c41 100644 --- a/docs/installation/authentication_mechanisms.asciidoc +++ b/docs/installation/authentication_mechanisms.asciidoc @@ -45,6 +45,7 @@ Where : * *OU* is the OU in the Active Directory where you want to create your computer account. * *Machine account password* password of server's account in your Active Directory * *Allow on registration* would allow devices in the registration network to communicate with the DC. +* *additional machine accounts* How many *additional* machine accounts will be created to handle NTLM authentication. By default 0. Means only 1 machine account will be created. Maximum is 10, you can only create 10 additional machine accounts. You can always check your domain settings by running `net config workstation` on your domain controller. form the output, diff --git a/docs/installation/performance_optimizations.asciidoc b/docs/installation/performance_optimizations.asciidoc index 08b90a3f275d..24551f72724b 100644 --- a/docs/installation/performance_optimizations.asciidoc +++ b/docs/installation/performance_optimizations.asciidoc @@ -18,6 +18,85 @@ endif::[] //== Performance Optimizations + +=== Multi Machine Account Support === + +Starting from PacketFence 14.1, we brought new feature to ntlm-auth-api that allows the backend API to be able to process NTLM +requests in a multi-processes model. +Previously, only one machine account will be created and new NTLM auth request has to wait until the previous one was done. +The single thread model can slow down the performance in some extreme heavy loaded scenarios: e.g., a huge enterprise with +a giant windows AD database and devices. + +Now we are able to create multiple machine accounts and register each machine account with a dedicate process when NTLM auth API +starts. + +To enable this feature, you'll have to set the `additional_machine_accounts` to a non-zero value, and corresponding machine +account will be created based on the previous machine account name. + +The name pattern of additional machine account is: "base_machine_account_name"-N, N will be 0..9 +E.g., the machine account you created before was NODE-PF, and the additional machine accounts is set to 2, +then 2 additional accounts will be created on Windows AD with the names of `NODE-PF-0` and `NODE-PF-1`. + +NOTE: There's a hard limitation of how many characters can we use for a machine account name. On windows system, the +limit is 14, if you want to enable this feature, you'll have to make sure the machine account name has a length less than +12. So PacketFence can create additional accounts with a "-N" suffix. + + +==== How does this work ==== +PacketFence will re-generate the config file load the configuration file, determine how many sub-processes will be needed +to use all those machine accounts, then gunicorn master process will launch the X sub processes to handle the requests. +each of the sub process will take a dedicate machine account to perform the authentication. + +==== Limitations ==== + +1. You can not create more than 10 additional machine accounts for each domain. +2. The machine account name can not exceed 12 chars if you want to enable `additional_machine_accounts`. +3. If you use %h as machine account name, the parsed value of %h also need to be less than 12 to enable additional machine accounts. + +==== Benchmarks and suggested settings ==== +We did a benchmark on this feature, here's some results for reference: +All the tests are done on VMs by ESXi 7.0 hosted on a SuperMicro server with 1 CPU of Intel D2123-IT (4C8T @2.2G), 128 G DDR4 RDIMM. + +we are testing this using ab (apache benchmark) to directly test against NTLM auth API with: + +1. 2 additional machine accounts is added (total 3) +2. The PacketFence is hosted on a 4 vCPU 32 Gig VM on ESXi. +3. The Windows AD is hosted on a 2 vCPU 8 Gig VM on ESXi in the same local network with PacketFence. + +``` +ab -n 20000 -c 1 -p ~/eapol_test/payload.admin-akam.json -T 'application/json' http://127.0.0.1:5002/ntlm/auth + +Requests per second: 448.22 [#/sec] (mean) +Time per request: 2.231 [ms] (mean) +Time per request: 2.231 [ms] (mean, across all concurrent requests) + + +ab -n 20000 -c 2 -p ~/eapol_test/payload.admin-akam.json -T 'application/json' http://127.0.0.1:5002/ntlm/auth + +Requests per second: 721.03 [#/sec] (mean) +Time per request: 2.774 [ms] (mean) +Time per request: 1.387 [ms] (mean, across all concurrent requests) + + +ab -n 20000 -c 3 -p ~/eapol_test/payload.admin-akam.json -T 'application/json' http://127.0.0.1:5002/ntlm/auth + +Requests per second: 932.21 [#/sec] (mean) +Time per request: 3.218 [ms] (mean) +Time per request: 1.073 [ms] (mean, across all concurrent requests) + +``` + +Based on the test results, with a single machine account created on Windows AD, the backend API is possible to handle around +400 requests/s, with 3 machine accounts, the capacity will raise up to around 1000 req/s, which is a quite busy and +heavy-load case. + +We suggest creating no more than 5 additional machine accounts (6 total) to maximize the performance as well as keeping +the Windows Event log clean and easy for debugging. + +For low worker loads cases, 1 additional machine account is recommended - just to avoid jitter when the master process terminates +the old process after 10k requests and re-spawn a new one. + + === NT Key Caching === NOTE: This section assumes that you already have an Active Directory domain configuration both in _Configuration -> Policies and Access Control -> Domains -> Active Directory Domains_ and _Configuration -> Policies and Access Control -> Authentication Sources_. If you don't, you need to first configure those. Refer to the appropriate sections of this guide for details on how to configure these two components. diff --git a/html/pfappserver/lib/pfappserver/Form/Config/Domain.pm b/html/pfappserver/lib/pfappserver/Form/Config/Domain.pm index 653d8a30f48c..7071548e6754 100644 --- a/html/pfappserver/lib/pfappserver/Form/Config/Domain.pm +++ b/html/pfappserver/lib/pfappserver/Form/Config/Domain.pm @@ -258,6 +258,14 @@ has_field 'machine_account_password' => tags => { after_element => \&help, help => 'Password of the machine account to be added to Active Directory.' }, ); +has_field 'additional_machine_accounts' => + ( + type => 'PosInteger', + label => 'Additional machine account for NTLM authentication', + default => 0, + tags => { after_element => \&help, + help => 'How many additional machine accounts should be created and used to parallel NTLM authentication' }, + ); has_field 'password_is_nt_hash' => ( type => 'Text', diff --git a/html/pfappserver/root/src/views/Configuration/domains/_components/TheForm.vue b/html/pfappserver/root/src/views/Configuration/domains/_components/TheForm.vue index 2b7e0284ee32..ed919180da5c 100644 --- a/html/pfappserver/root/src/views/Configuration/domains/_components/TheForm.vue +++ b/html/pfappserver/root/src/views/Configuration/domains/_components/TheForm.vue @@ -28,6 +28,11 @@ :disabled="!isNew && !isClone" /> + + {workgroup}; my $real_computer_name = $item->{server_name}; my $ou = $item->{ou}; + my $additional_machine_accounts = $item->{additional_machine_accounts}; if ($computer_name eq "%h") { $real_computer_name = hostname(); @@ -191,24 +192,35 @@ sub create { } if (!is_nt_hash_pattern($computer_password)) { - my ($add_status, $add_result) = pf::domain::add_computer(" ", $real_computer_name, $computer_password, $ad_server_ip, $ad_server_host, $dns_name, $workgroup, $ou, $bind_dn, $bind_pass); - if ($add_status == $FALSE) { - if ($add_result =~ /already exists(.+)use \-no\-add/) { - ($add_status, $add_result) = pf::domain::add_computer("-delete", $real_computer_name, $computer_password, $ad_server_ip, $ad_server_host, $dns_name, $workgroup, $ou, $bind_dn, $bind_pass); - if ($add_status == $FALSE) { - $self->render_error(422, "Unable to add machine account: removing existing machine account failed with following error: $add_result"); - return 0; + my @real_computer_names =($real_computer_name); + if ($additional_machine_accounts +0 > 0) { + for my $i (0..$additional_machine_accounts) { + push(@real_computer_names, "$real_computer_name-$i"); + } + } + for (my $i = 0; $i < @real_computer_names[]; $i++) { + $real_computer_name = $real_computer_names[$i]; + + my ($add_status, $add_result) = pf::domain::add_computer(" ", $real_computer_name, $computer_password, $ad_server_ip, $ad_server_host, $dns_name, $workgroup, $ou, $bind_dn, $bind_pass); + if ($add_status == $FALSE) { + if ($add_result =~ /already exists(.+)use \-no\-add/) { + ($add_status, $add_result) = pf::domain::add_computer("-delete", $real_computer_name, $computer_password, $ad_server_ip, $ad_server_host, $dns_name, $workgroup, $ou, $bind_dn, $bind_pass); + if ($add_status == $FALSE) { + $self->render_error(422, "Unable to add machine account: removing existing machine account failed with following error: $add_result"); + return 0; + } + ($add_status, $add_result) = pf::domain::add_computer(" ", $real_computer_name, $computer_password, $ad_server_ip, $ad_server_host, $dns_name, $workgroup, $ou, $bind_dn, $bind_pass); + if ($add_status == $FALSE) { + $self->render_error(422, "Unable to add machine account: recreating machine account with following error: $add_result"); + return 0; + } } - ($add_status, $add_result) = pf::domain::add_computer(" ", $real_computer_name, $computer_password, $ad_server_ip, $ad_server_host, $dns_name, $workgroup, $ou, $bind_dn, $bind_pass); - if ($add_status == $FALSE) { - $self->render_error(422, "Unable to add machine account: recreating machine account with following error: $add_result"); + else { + $self->render_error(422, "Unable to add machine account with following error: $add_result"); return 0; } } - else { - $self->render_error(422, "Unable to add machine account with following error: $add_result"); - return 0; - } + } my $encoded_password = encode("utf-16le", $computer_password); my $hash = md4_hex($encoded_password); @@ -261,6 +273,7 @@ sub update { my $workgroup = $old_item->{workgroup}; my $real_computer_name = $old_item->{server_name}; my $ou = $new_item->{ou}; + my $additional_machine_accounts = $new_item->{additional_machine_accounts}; if ($computer_name eq "%h") { $real_computer_name = hostname(); @@ -296,28 +309,39 @@ sub update { return $self->render_error(422, "Unable to determine AD server's IP address\n") } - if (!is_nt_hash_pattern($new_data->{machine_account_password}) && ($new_data->{machine_account_password} ne $old_item->{machine_account_password})) { - my ($add_status, $add_result) = pf::domain::add_computer("-delete", $real_computer_name, $computer_password, $ad_server_ip, $ad_server_host, $dns_name, $workgroup, $ou, $bind_dn, $bind_pass); - if ($add_status == $FALSE) { - unless ($add_result =~ /Account (.+) not found in/) { - $self->render_error(422, "Unable to update - remove existing machine account with following error: $add_result"); + my @real_computer_names = ($real_computer_name); + + if ($additional_machine_accounts +0 > 0) { + for my $i (0..$additional_machine_accounts) { + push(@real_computer_names, "$real_computer_name-$i"); + } + } + for (my $i = 0; $i < @real_computer_names[]; $i++) { + $real_computer_name = $real_computer_names[$i]; + if (!is_nt_hash_pattern($new_data->{machine_account_password}) && ($new_data->{machine_account_password} ne $old_item->{machine_account_password})) { + my ($add_status, $add_result) = pf::domain::add_computer("-delete", $real_computer_name, $computer_password, $ad_server_ip, $ad_server_host, $dns_name, $workgroup, $ou, $bind_dn, $bind_pass); + if ($add_status == $FALSE) { + unless ($add_result =~ /Account (.+) not found in/) { + $self->render_error(422, "Unable to update - remove existing machine account with following error: $add_result"); + return 0; + } + } + + ($add_status, $add_result) = pf::domain::add_computer(" ", $real_computer_name, $computer_password, $ad_server_ip, $ad_server_host, $dns_name, $workgroup, $ou, $bind_dn, $bind_pass); + if ($add_status == $FALSE) { + $self->render_error(422, "Unable to add machine account with following error: $add_result"); return 0; } - } - ($add_status, $add_result) = pf::domain::add_computer(" ", $real_computer_name, $computer_password, $ad_server_ip, $ad_server_host, $dns_name, $workgroup, $ou, $bind_dn, $bind_pass); - if ($add_status == $FALSE) { - $self->render_error(422, "Unable to add machine account with following error: $add_result"); - return 0; + $new_data->{machine_account_password} = md4_hex(encode("utf-16le", $new_data->{machine_account_password})); + $new_data->{ou} = $new_item->{ou} + } + else { + $new_data->{ou} = $old_item->{ou} } - - $new_data->{machine_account_password} = md4_hex(encode("utf-16le", $new_data->{machine_account_password})); - $new_data->{ou} = $new_item->{ou} - } - else { - $new_data->{ou} = $old_item->{ou} } + $new_data->{server_name} = $computer_name; delete $new_data->{id}; delete $new_data->{bind_dn}; diff --git a/lib/pf/services/manager/ntlm_auth_api.pm b/lib/pf/services/manager/ntlm_auth_api.pm index e782b2246095..f4bfd9f624b1 100644 --- a/lib/pf/services/manager/ntlm_auth_api.pm +++ b/lib/pf/services/manager/ntlm_auth_api.pm @@ -63,6 +63,10 @@ sub generateConfig { pf_run("sudo echo 'DB=$db' >> $generated_conf_dir/" . $self->name . '.d/' . "db.ini"); pf_run("sudo echo 'DB_UNIX_SOCKET=$db_unix_socket' >> $generated_conf_dir/" . $self->name . '.d/' . "db.ini"); + pf_run("sudo echo '[CACHE]' >> $generated_conf_dir/" . $self->name . '.d/' . "db.ini"); + pf_run("sudo echo 'CACHE_HOST=containers-gateway.internal' >> $generated_conf_dir/" . $self->name . '.d/' . "db.ini"); + pf_run("sudo echo 'CACHE_PORT=6379' >> $generated_conf_dir/" . $self->name . '.d/' . "db.ini"); + my $host_id = hostname(); for my $identifier (keys(%ConfigDomain)) { my %conf = %{$ConfigDomain{$identifier}}; From 7f11b50cd8f85ff2a23d50cdcdd987f76cda2407 Mon Sep 17 00:00:00 2001 From: Darren+Satkunas Date: Tue, 29 Oct 2024 15:30:28 +0000 Subject: [PATCH 077/176] update NEWS --- NEWS.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/NEWS.asciidoc b/NEWS.asciidoc index 4d79ece176c4..b89d97123774 100644 --- a/NEWS.asciidoc +++ b/NEWS.asciidoc @@ -28,6 +28,7 @@ For a list of compatibility related changes see the < Date: Tue, 29 Oct 2024 10:28:44 -0700 Subject: [PATCH 078/176] update packetfence.spec for #8335 --- rpm/packetfence.spec | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/rpm/packetfence.spec b/rpm/packetfence.spec index 9eaa122d985b..5198b5ac08f1 100644 --- a/rpm/packetfence.spec +++ b/rpm/packetfence.spec @@ -935,18 +935,22 @@ fi %attr(0755, pf, pf) /usr/local/pf/bin/cluster/maintenance %attr(0755, pf, pf) /usr/local/pf/bin/cluster/node %dir /usr/local/pf/bin/pyntlm_auth -%attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/app.py %attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/config_generator.py %attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/config_loader.py %attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/constants.py +%attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/entrypoint.py %attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/flags.py %attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/global_vars.py +%attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/gunicorn.conf.py %attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/handlers.py %attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/ms_event.py %attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/ncache.py +%attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/redis_client.py %attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/rpc.py -%attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/t_api.py +%attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/t_async_job.py +%attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/t_health_checker.py %attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/t_sdnotify.py +%attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/t_worker_registry.py %attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/utils.py %attr(0755, pf, pf) /usr/local/pf/sbin/galera-autofix %attr(0755, pf, pf) /usr/local/pf/sbin/mysql-probe From 70573d53c7399d97448e49c872e5a82c3cc47ce5 Mon Sep 17 00:00:00 2001 From: Darren Satkunas Date: Tue, 29 Oct 2024 12:47:50 -0700 Subject: [PATCH 079/176] fix typo #8335 --- rpm/packetfence.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpm/packetfence.spec b/rpm/packetfence.spec index 5198b5ac08f1..49835ead5906 100644 --- a/rpm/packetfence.spec +++ b/rpm/packetfence.spec @@ -950,7 +950,7 @@ fi %attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/t_async_job.py %attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/t_health_checker.py %attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/t_sdnotify.py -%attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/t_worker_registry.py +%attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/t_worker_register.py %attr(0755, pf, pf) /usr/local/pf/bin/pyntlm_auth/utils.py %attr(0755, pf, pf) /usr/local/pf/sbin/galera-autofix %attr(0755, pf, pf) /usr/local/pf/sbin/mysql-probe From e67c195dae86c38f5904c9a8ffe6d36acd9fde4a Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Wed, 30 Oct 2024 08:56:33 -0400 Subject: [PATCH 080/176] [Venom] Backup and Restore, increase time to search find and can not return empty --- .../backup_db_and_restore/00_backup_db_and_restore.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml b/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml index be2b76fdc97e..5bd53eb5f7b9 100644 --- a/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml +++ b/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml @@ -28,7 +28,9 @@ testcases: - name: get_backup_name steps: - type: exec - script: 'find {{.backup_db_and_restore.backup_dir}} -name "packetfence-exportable-backup-*.tgz" -newermt "-1 minute"' + script: 'find {{.backup_db_and_restore.backup_dir}} -name "packetfence-exportable-backup-*.tgz" -newermt "-10 minute"' | head -n 1 + assertion: + - result.systemout ShouldNotBeEmpty - name: import steps: From 63a1a0245359236c31860798c42a84eadb1380a4 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Wed, 30 Oct 2024 11:15:21 -0400 Subject: [PATCH 081/176] [Venom] Backup and Restore, fix json and assertions --- .../backup_db_and_restore/00_backup_db_and_restore.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml b/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml index 5bd53eb5f7b9..9ed1d00e12e7 100644 --- a/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml +++ b/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml @@ -28,8 +28,8 @@ testcases: - name: get_backup_name steps: - type: exec - script: 'find {{.backup_db_and_restore.backup_dir}} -name "packetfence-exportable-backup-*.tgz" -newermt "-10 minute"' | head -n 1 - assertion: + script: 'find {{.backup_db_and_restore.backup_dir}} -name "packetfence-exportable-backup-*.tgz" -newermt "-10 minute" | head -n 1' + assertions: - result.systemout ShouldNotBeEmpty - name: import From 8ce8d0b1e9300a48a6cbb536e614a2d5100fe4c3 Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Wed, 30 Oct 2024 12:45:20 -0400 Subject: [PATCH 082/176] removed password from the owner attributes (filter engine) --- lib/pf/constants/filters.pm | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/pf/constants/filters.pm b/lib/pf/constants/filters.pm index d6c8b48556c1..754d50b391df 100644 --- a/lib/pf/constants/filters.pm +++ b/lib/pf/constants/filters.pm @@ -213,7 +213,6 @@ our @OWNER_FIELDS = qw( owner.source owner.nodes owner.otp - owner.password owner.potd owner.psk owner.valid_from From 61f733bea355bfc89a666c1bc48a81ecf6329bb9 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Thu, 31 Oct 2024 11:02:27 -0400 Subject: [PATCH 083/176] [Venom] Backup and restore: Remove public user --- .../backup_db_and_restore/00_backup_db_and_restore.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml b/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml index 9ed1d00e12e7..c5d23eccdf1e 100644 --- a/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml +++ b/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml @@ -20,6 +20,11 @@ testcases: assertions: - result.statuscode ShouldEqual 201 +- name: remove PUBLIC user from mysql.user + steps: + - type: exec + script: "mysql -e'DELETE FROM mysql.user WHERE User=\"PUBLIC\"';" + - name: create exportable backup steps: - type: exec From a0e51cd82c2b93c367d44403ab68adc0f945103a Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Thu, 31 Oct 2024 11:13:00 -0400 Subject: [PATCH 084/176] [Venom] Backup and restore: Remove public user the right way --- .../backup_db_and_restore/00_backup_db_and_restore.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml b/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml index c5d23eccdf1e..2f8be122eec4 100644 --- a/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml +++ b/t/venom/test_suites/backup_db_and_restore/00_backup_db_and_restore.yml @@ -23,7 +23,7 @@ testcases: - name: remove PUBLIC user from mysql.user steps: - type: exec - script: "mysql -e'DELETE FROM mysql.user WHERE User=\"PUBLIC\"';" + script: "mysql -e'DROP USER IF EXISTS PUBLIC;'" - name: create exportable backup steps: From 74cc5f6cedd62d1f247e68908969a717bfa01f11 Mon Sep 17 00:00:00 2001 From: Zhihao Ma Date: Thu, 31 Oct 2024 11:42:21 -0400 Subject: [PATCH 085/176] change lp load to lp load default to avoid file creation. change srv record looking up to native samba finddc implementation --- bin/pyntlm_auth/config_loader.py | 1 + bin/pyntlm_auth/global_vars.py | 1 + bin/pyntlm_auth/rpc.py | 71 +++++++++++++++++++++++--------- 3 files changed, 54 insertions(+), 19 deletions(-) diff --git a/bin/pyntlm_auth/config_loader.py b/bin/pyntlm_auth/config_loader.py index d52523b71560..f3a74af690f8 100644 --- a/bin/pyntlm_auth/config_loader.py +++ b/bin/pyntlm_auth/config_loader.py @@ -377,6 +377,7 @@ def config_load(): print(f" max_allowed_password_attempts_per_device : {max_allowed_attempts_per_device}") global_vars.c_server_name = ad_fqdn + global_vars.c_ad_server = ad_server global_vars.c_realm = realm global_vars.c_workgroup = workgroup global_vars.c_username = username diff --git a/bin/pyntlm_auth/global_vars.py b/bin/pyntlm_auth/global_vars.py index a959126bad79..0885bf88bc42 100644 --- a/bin/pyntlm_auth/global_vars.py +++ b/bin/pyntlm_auth/global_vars.py @@ -44,6 +44,7 @@ def get_value(key, value=None): c_domain = None c_username = None c_server_name = None +c_ad_server = None c_listen_port = None c_domain_identifier = None c_dns_servers = None diff --git a/bin/pyntlm_auth/rpc.py b/bin/pyntlm_auth/rpc.py index 425e65a6cf67..ef5f8ed8c1cd 100644 --- a/bin/pyntlm_auth/rpc.py +++ b/bin/pyntlm_auth/rpc.py @@ -1,14 +1,51 @@ import global_vars -import config_generator from samba import param, NTSTATUSError, ntstatus from samba.credentials import Credentials, DONT_USE_KERBEROS from samba.dcerpc.misc import SEC_CHAN_WKSTA -from samba.dcerpc import netlogon +from samba.dcerpc import netlogon, nbt import utils import datetime from samba.dcerpc.netlogon import (netr_Authenticator, MSV1_0_ALLOW_WORKSTATION_TRUST_ACCOUNT, MSV1_0_ALLOW_MSVCHAPV2) import binascii -import random +from samba.net import Net + + +def find_dc(lp): + error_code = -1 + error_message = "unknown error" + + if global_vars.c_dns_servers.strip() != "": + try: + net = Net(Credentials(), lp) + dc = net.finddc(domain=lp.get('realm'), flags=nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS) + return 0, "", dc.pdc_dns_name + except NTSTATUSError as e: + error_code = e.args[0] + error_message = e.args[1] + except Exception as e: + try: + error_code = e.args[0] + except Exception: + pass + error_message = str(e) + + if global_vars.c_server_name.strip() != "" and global_vars.c_ad_server.strip() != "": + try: + net = Net(Credentials(), lp) + dc = net.finddc(address=global_vars.c_ad_server, flags=nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS) + return 0, "", dc.pdc_dns_name + except NTSTATUSError as e: + error_code = e.args[0] + error_message = e.args[1] + except Exception as e: + try: + error_code = e.args[0] + except Exception: + pass + error_message = str(e) + + return error_code, error_message, None + def init_secure_connection(): netbios_name = global_vars.c_netbios_name @@ -19,26 +56,23 @@ def init_secure_connection(): password = global_vars.c_password domain = global_vars.c_domain username = global_vars.c_username - server_name = global_vars.c_server_name # FQDN of Domain Controller - - domain_controller_records = utils.find_ldap_servers(global_vars.c_realm, global_vars.c_dns_servers) - if len(domain_controller_records) > 0: - idx = random.randint(0, len(domain_controller_records) - 1) - record = domain_controller_records[idx] - server_name = record.get('target') lp = param.LoadParm() - try: - config_generator.generate_empty_conf() - lp.load("/usr/local/pf/var/conf/default.conf") - except KeyError: - raise KeyError("SMB_CONF_PATH not set") + lp.load_default() lp.set('netbios name', netbios_name) lp.set('realm', realm) lp.set('server string', server_string) lp.set('workgroup', workgroup) + error_code, error_message, pdc_dns_name = find_dc(lp) + if error_code != 0: + return global_vars.s_secure_channel_connection, global_vars.s_machine_cred, error_code, error_message + else: + global_vars.c_server_name = pdc_dns_name + + server_name = global_vars.c_server_name # FQDN of Domain Controller + global_vars.s_machine_cred = Credentials() global_vars.s_machine_cred.guess(lp) @@ -98,8 +132,6 @@ def transitive_login(account_username, challenge, nt_response, domain=None): if domain is None: domain = global_vars.c_domain - server_name = global_vars.c_server_name - workstation = global_vars.c_workstation global_vars.s_secure_channel_connection, global_vars.s_machine_cred, global_vars.s_connection_id, error_code, error_message = get_secure_channel_connection() if error_code != 0: return f"Error while establishing secure channel connection: {error_message}", error_code, None @@ -134,11 +166,12 @@ def transitive_login(account_username, challenge, nt_response, domain=None): logon.identity_info = netlogon.netr_IdentityInfo() logon.identity_info.domain_name.string = domain logon.identity_info.account_name.string = account_username - logon.identity_info.workstation.string = workstation + logon.identity_info.workstation.string = global_vars.c_workstation logon.identity_info.parameter_control = MSV1_0_ALLOW_WORKSTATION_TRUST_ACCOUNT | MSV1_0_ALLOW_MSVCHAPV2 try: - result = global_vars.s_secure_channel_connection.netr_LogonSamLogonWithFlags(server_name, workstation, + result = global_vars.s_secure_channel_connection.netr_LogonSamLogonWithFlags(global_vars.c_server_name, + global_vars.c_workstation, current, subsequent, logon_level, logon, validation_level, From b895e54149102db64c121ffbb4eb16c4ee54ccc6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cabprasa=E2=80=9D?= <123509565+abprasa@users.noreply.github.com> Date: Mon, 4 Nov 2024 07:32:12 +0000 Subject: [PATCH 086/176] updated the web/constants.pm file --- lib/pf/web/constants.pm | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/pf/web/constants.pm b/lib/pf/web/constants.pm index c9b66f388005..f9eca2c11079 100644 --- a/lib/pf/web/constants.pm +++ b/lib/pf/web/constants.pm @@ -110,6 +110,8 @@ Readonly::Scalar our $EXT_URL_ARUBA_SWITCH => '^/Aruba::2930M'; Readonly::Scalar our $EXT_URL_CISCO_CATALYST_2960 => '^/Cisco::Catalyst_2960'; Readonly::Scalar our $EXT_URL_CISCO_CISCO_IOS_15_0 => '^/Cisco::Cisco_IOS_15_0'; Readonly::Scalar our $EXT_URL_CISCO_WLC => '^/Cisco::WLC'; +Readonly::Scalar our $EXT_URL_CISCO_WLC_AIREOS => '^/Cisco::Cisco_WLC_AireOS'; +Readonly::Scalar our $EXT_URL_CISCO_WLC_IOS_XE => '^/Cisco::Cisco_WLC_IOS_XE'; Readonly::Scalar our $EXT_URL_CISCO_ASA => '^/Cisco::ASA'; Readonly::Scalar our $EXT_URL_COOVACHILLI => '^/CoovaChilli'; Readonly::Scalar our $EXT_URL_MERAKI => '^/Meraki::MR'; From e3ac42d7a2631ad95949fd7c5013263fbad62d60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cabprasa=E2=80=9D?= <123509565+abprasa@users.noreply.github.com> Date: Mon, 4 Nov 2024 07:59:56 +0000 Subject: [PATCH 087/176] created and updated the to-14.1-convert-ciscoWLC-switch-types.pl script --- .../to-14.1-convert-ciscoWLC-switch-types.pl | 87 +++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100755 addons/upgrade/to-14.1-convert-ciscoWLC-switch-types.pl diff --git a/addons/upgrade/to-14.1-convert-ciscoWLC-switch-types.pl b/addons/upgrade/to-14.1-convert-ciscoWLC-switch-types.pl new file mode 100755 index 000000000000..8af06bec4f81 --- /dev/null +++ b/addons/upgrade/to-14.1-convert-ciscoWLC-switch-types.pl @@ -0,0 +1,87 @@ +#!/usr/bin/perl + +=head1 NAME + +to-13.1-convert-switch-types.pl + +=head1 DESCRIPTION + +Convert some switch types and use Switch OS versions + +=cut + +use strict; +use warnings; +use lib qw(/usr/local/pf/lib); +use lib qw(/usr/local/pf/lib_perl/lib/perl5); +use pf::util qw(run_as_pf); +use pf::IniFiles; +use pf::file_paths qw( + $switches_config_file +); +use File::Copy; + +run_as_pf(); + +my $file = $switches_config_file; + +if (@ARGV) { + $file = $ARGV[0]; +} + +our %types = ( + 'Cisco::WLC' => 'Cisco::Cisco_WLC_AireOS', + 'Cisco::WLC_2100' => 'Cisco::Cisco_WLC_AireOS', + 'Cisco::WLC_2106' => 'Cisco::Cisco_WLC_AireOS', + 'Cisco::WLC_2500' => 'Cisco::Cisco_WLC_AireOS', + 'Cisco::WLC_4400' => 'Cisco::Cisco_WLC_AireOS', + 'Cisco::WLC_5500' => 'Cisco::Cisco_WLC_AireOS', +); + +my $cs = pf::IniFiles->new(-file => $file, -allowempty => 1); + +my $update = 0; +for my $section ($cs->Sections()) { + my $type = $cs->val($section, 'type'); + next if !defined $type || !exists $types{$type}; + my $new_type = $types{$type}; + $cs->setval($section, 'type', $new_type); + $update |= 1; +} + +if ($update) { + $cs->RewriteConfig(); + print "All done\n"; + exit 0; +} + + +print "Nothing to be done\n"; + +=head1 AUTHOR + +Inverse inc. + +=head1 COPYRIGHT + +Copyright (C) 2005-2024 Inverse inc. + +=head1 LICENSE + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, +USA. + +=cut + From 68c2691379c5b83bab0ade9aa7071e1030cbebe5 Mon Sep 17 00:00:00 2001 From: James Rouzier Date: Mon, 4 Nov 2024 21:53:29 +0000 Subject: [PATCH 088/176] Fix syntax error --- lib/pf/UnifiedApi/Controller/Config/Domains.pm | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/pf/UnifiedApi/Controller/Config/Domains.pm b/lib/pf/UnifiedApi/Controller/Config/Domains.pm index 56dd8714ec77..d46a2dfa5c0b 100644 --- a/lib/pf/UnifiedApi/Controller/Config/Domains.pm +++ b/lib/pf/UnifiedApi/Controller/Config/Domains.pm @@ -198,7 +198,7 @@ sub create { push(@real_computer_names, "$real_computer_name-$i"); } } - for (my $i = 0; $i < @real_computer_names[]; $i++) { + for (my $i = 0; $i < @real_computer_names; $i++) { $real_computer_name = $real_computer_names[$i]; my ($add_status, $add_result) = pf::domain::add_computer(" ", $real_computer_name, $computer_password, $ad_server_ip, $ad_server_host, $dns_name, $workgroup, $ou, $bind_dn, $bind_pass); @@ -316,7 +316,7 @@ sub update { push(@real_computer_names, "$real_computer_name-$i"); } } - for (my $i = 0; $i < @real_computer_names[]; $i++) { + for (my $i = 0; $i < @real_computer_names; $i++) { $real_computer_name = $real_computer_names[$i]; if (!is_nt_hash_pattern($new_data->{machine_account_password}) && ($new_data->{machine_account_password} ne $old_item->{machine_account_password})) { my ($add_status, $add_result) = pf::domain::add_computer("-delete", $real_computer_name, $computer_password, $ad_server_ip, $ad_server_host, $dns_name, $workgroup, $ou, $bind_dn, $bind_pass); From 7d66da6c3becad1c45a74d4912c5e7e54e99d38d Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Tue, 5 Nov 2024 11:09:47 -0500 Subject: [PATCH 089/176] Fix/8280 Fix the material page update since moving private repo (#8378) * Add artifact and remove pushed to git repo * Change job's name * Extract file from docker container * Fix create directory to extract material.html file --- .gitlab-ci.yml | 8 ++++++-- ci/lib/build/generate-material.sh | 6 ++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 0d5399c60a60..4c940c8baa51 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -389,6 +389,10 @@ variables: DST_FILE: layouts/partials/about/material.html script: - ${BUILDDIR}/generate-material.sh + artifacts: + expire_in: 1 day + paths: + - result/material.html tags: - shell @@ -1149,14 +1153,14 @@ build_artifacts_website_release: - .release_only_rules # build_artificats_material job for development -material_devel_and_branches: +build_artifacts_material_devel_and_branches: image: ${PFBUILD_DEB_IMG}:${PFBUILD_DEFAULT_DEV_TAG} extends: - .build_artifacts_material_job - .build_artifacts_material_devel_and_branches_rules # build_artificats_material job for release -material_release: +build_artifacts_material_release: image: ${PFBUILD_DEB_IMG}:${CI_COMMIT_TAG} extends: - .build_artifacts_material_job diff --git a/ci/lib/build/generate-material.sh b/ci/lib/build/generate-material.sh index 3d215385c5fb..ea5d0fbf5073 100755 --- a/ci/lib/build/generate-material.sh +++ b/ci/lib/build/generate-material.sh @@ -38,6 +38,7 @@ generate_material() { make -C ${PF_SRC_DIR} configurations make -C ${PF_SRC_DIR} conf/unified_api_system_pass make -C ${PF_SRC_DIR} conf/local_secret + mkdir -p ${PF_SRC_DIR}/result echo "Starting ${CONTAINER_NAME} container" docker run --detach --name=${CONTAINER_NAME} --rm -e PFCONFIG_PROTO=unix \ @@ -51,6 +52,7 @@ generate_material() { -v ${PF_SRC_DIR}/ci/lib:/usr/local/pf/ci/lib \ -v ${PF_SRC_DIR}/config.mk:/usr/local/pf/config.mk \ -v ${PF_SRC_DIR}/Makefile:/usr/local/pf/Makefile \ + -v ${PF_SRC_DIR}/result:/usr/local/pf/result \ ghcr.io/inverse-inc/packetfence/pfconfig:${IMAGE_TAG} echo "Let some time to container to start" @@ -59,8 +61,8 @@ generate_material() { echo "Generating material.html file" docker exec ${CONTAINER_NAME} /usr/bin/make material - echo "Publishing material.html to git if necessary" - docker exec ${CONTAINER_NAME} /usr/local/pf/ci/lib/release/publish-to-git.sh ${SRC_FILE} ${DST_FILE} + #echo "Publishing material.html to git if necessary" + #docker exec ${CONTAINER_NAME} /usr/local/pf/ci/lib/release/publish-to-git.sh ${SRC_FILE} ${DST_FILE} } cleanup() { From 587c662614b3fc7ffffadda7183d36588a3eee21 Mon Sep 17 00:00:00 2001 From: stgmsa Date: Tue, 5 Nov 2024 11:15:45 -0500 Subject: [PATCH 090/176] return backend api errors to ntlm auth wrapper (#8357) --- src/ntlm_auth_wrap.c | 44 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 34 insertions(+), 10 deletions(-) diff --git a/src/ntlm_auth_wrap.c b/src/ntlm_auth_wrap.c index a7832594e779..0a831599f2d7 100644 --- a/src/ntlm_auth_wrap.c +++ b/src/ntlm_auth_wrap.c @@ -45,6 +45,13 @@ The process is meant to be very short lived and never reused. */ const char *argp_program_version = "ntlm_auth_wrapper 1.0"; const char *argp_program_bug_address = ""; +const int exit_code_no_error = 0; +const int exit_code_general_error = 1; +const int exit_code_network_error = 2; +const int exit_code_auth_failed = 3; +const int exit_code_api_error = 4; +const int exit_code_invalid_input = 5; + /* Program documentation. */ static char doc[] = @@ -334,19 +341,23 @@ char **argv, **envp; // keep the same values we used before, so SIGTERM = timeout, other non-zero values = auth error int status = 0; + int exit_code = exit_code_no_error; - if (strcmp(arguments.api_host, "") ==0 || strcmp(arguments.api_port, "0") == 0) { + if (strcmp(arguments.api_host, "") == 0 || strcmp(arguments.api_port, "0") == 0) { fprintf(stderr, "Error: missing NTLM auth API host or port settings.\n"); fprintf(stderr, "This could happen if you previously manually joined this server to Windows AD.\n"); fprintf(stderr, "If this is the case, you need to go to the admin UI, re-create the domain configuration.\n"); - exit(1); + + exit(exit_code_invalid_input); } cJSON *json = cJSON_CreateObject(); if (json == NULL) { fprintf(stderr, "Error: could not create JSON object. Exiting."); - exit(1); + + exit(exit_code_general_error); } + for (int i = 1; i < argc; i++) { if (strncmp(argv[i], "--username=", strlen("--username=")) == 0) { cJSON_AddStringToObject(json, "username", argv[i] + strlen("--username=")); @@ -393,23 +404,38 @@ char **argv, **envp; cURLCode = curl_easy_perform(curl); free(uri); + if (cURLCode == CURLE_OK) { - status = 0; long http_response_code; curl_easy_getinfo(curl, CURLINFO_HTTP_CODE, &http_response_code); - if (http_response_code != 200) { + if (http_response_code == 200) { + status = 0; + exit_code = 0; + } else { status = http_response_code; // consider non-200 response as auth failures. + exit_code = exit_code_general_error; + if (400 <= http_response_code && http_response_code <= 499) { + exit_code = exit_code_auth_failed; + } + if (500 <= http_response_code && http_response_code <= 599) { + exit_code = exit_code_api_error; + } } printf("%s\n", chunk.memory); } else { - status = cURLCode; + exit_code = exit_code_network_error; if (cURLCode==CURLE_OPERATION_TIMEDOUT || cURLCode == CURLE_COULDNT_RESOLVE_HOST || cURLCode == CURLE_COULDNT_CONNECT) { - status = SIGTERM; // timeout or any network errors, considered as time-outs + status = SIGTERM; // timeout / unreachable dest are considered as "network issues" (previously SIGTERM) + } else { + status = cURLCode; } fprintf(stderr, "exec curl failed: %s\n", curl_easy_strerror(cURLCode)); } curl_slist_free_all(headers); curl_easy_cleanup(curl); + } else { + exit_code = exit_code_general_error; + fprintf(stderr, "Unable to initialize curl object."); } free(chunk.memory); free(json_string); @@ -423,7 +449,5 @@ char **argv, **envp; if (!arguments.nostatsd) send_statsd(arguments, status, elapsed); - if (status != 0) { - exit(1); - } + exit(exit_code); } From 209dbcb5fb86f794e69c497fbd3a24ff118ef4c8 Mon Sep 17 00:00:00 2001 From: E-ThanG Date: Tue, 5 Nov 2024 14:08:02 -0800 Subject: [PATCH 091/176] Parse RADIUS request attributes during Redis cache extraction Added parsing of WLAN-AKM-Suite, WLAN-Group-Cipher, WLAN-Pairwise-Cipher, TLS-Cert-Expiration, TLS-Cert-Valid-Since, TLS-Client-Cert-Expiration, and TLS-Client-Cert-Valid-Since attributes. WLAN AKM and Cipher Suite attributes are decoded and filled with the name of the AKM or suite used. Dates are presented in a human-readable format with 4-digit year. Existing behavior with WLAN attributes is that the integers are erroneously unmarshaled as Float64 by Golang json.Unmarshal. Dates are presented as a string with 2-digit year and no field delimiters or spacing ("241025235210Z"). --- go/cron/flush_radius_audit_log_job.go | 167 ++++++++++++++++++++++++++ 1 file changed, 167 insertions(+) diff --git a/go/cron/flush_radius_audit_log_job.go b/go/cron/flush_radius_audit_log_job.go index e462f3e7a7f8..1b9b24587175 100644 --- a/go/cron/flush_radius_audit_log_job.go +++ b/go/cron/flush_radius_audit_log_job.go @@ -191,6 +191,7 @@ func (j *FlushRadiusAuditLogJob) argsFromEntry(entry []interface{}) []interface{ request = entry[1].(map[string]interface{}) reply = entry[2].(map[string]interface{}) control = entry[3].(map[string]interface{}) + request = parseRequestArgs(request) args[2] = formatRequestValue(request["PacketFence-Computer-Name"], "N/A") args[0] = formatRequestValue(request["Calling-Station-Id"], "N/A") args[1] = formatRequestValue(request["Framed-IP-Address"], "N/A") @@ -340,3 +341,169 @@ func interfaceToStr(i interface{}, defaultStr string) string { return defaultStr } + +func parseRequestArgs(request map[string]interface{}) map[string]interface{} { + if val, ok := request["WLAN-AKM-Suite"].(float64); ok { + request["WLAN-AKM-Suite"] = mapAKMSuite(int(val)) + } + if val, ok := request["WLAN-Group-Cipher"].(float64); ok { + request["WLAN-Group-Cipher"] = mapCipherSuite(int(val)) + } + if val, ok := request["WLAN-Pairwise-Cipher"].(float64); ok { + request["WLAN-Pairwise-Cipher"] = mapCipherSuite(int(val)) + } + if val, ok := request["TLS-Cert-Expiration"].(string); ok { + request["TLS-Cert-Expiration"] = formatDate(val) + } + if val, ok := request["TLS-Cert-Valid-Since"].(string); ok { + request["TLS-Cert-Valid-Since"] = formatDate(val) + } + if val, ok := request["TLS-Client-Cert-Expiration"].(string); ok { + request["TLS-Client-Cert-Expiration"] = formatDate(val) + } + if val, ok := request["TLS-Client-Cert-Valid-Since"].(string); ok { + request["TLS-Client-Cert-Valid-Since"] = formatDate(val) + } + return request +} + +type AKMSuite int + +const ( + AKMReserved AKMSuite = iota // 0 - Reserved + IEEE8021X // 1 - 802.1X + PSK // 2 - PSK + FT_8021X // 3 - FT over 802.1X + FT_PSK // 4 - FT over PSK + WPA_8021X // 5 - WPA with 802.1X + WPA_PSK // 6 - WPA with PSK + OWE // 7 - OWE + OWE_Transition // 8 - OWE Transition Mode + SAE // 9 - Simultaneous Authentication of Equals + FT_SAE // 10 - FT over SAE + FILS_SHA256 // 11 - FILS-SHA256 + FILS_SHA384 // 12 - FILS-SHA384 + FT_FILS_SHA256 // 13 - FT over FILS-SHA256 + FT_FILS_SHA384 // 14 - FT over FILS-SHA384 + OWE_transition_mode // 15 - OWE transition mode +) + +type CipherSuite int + +const ( + CipherReserved CipherSuite = iota // 0 - Reserved + WEP40 // 1 - WEP-40 + TKIP // 2 - TKIP + CipherReserved3 // 3 - Reserved + CCMP128 // 4 - CCMP-128 + WEP104 // 5 - WEP-104 + BIPCMAC128 // 6 - BIP-CMAC-128 + GCMP128 // 7 - GCMP-128 + GCMP256 // 8 - GCMP-256 + CCMP256 // 9 - CCMP-256 + BIPGMAC128 // 10 - BIP-GMAC-128 + BIPGMAC256 // 11 - BIP-GMAC-256 + SMS4 // 12 - SMS4 + CKIP128 // 13 - CKIP-128 + CKIP128_PMK // 14 - CKIP-128 with PMK caching + CipherReserved15 // 15 - Reserved +) + +func(c CipherSuite) String() string { + switch c { + case WEP40: + return "WEP-40" + case TKIP: + return "TKIP" + case CCMP128: + return "CCMP-128" + case WEP104: + return "WEP-104" + case GCMP128: + return "GCMP-128" + case GCMP256: + return "GCMP-256" + case CCMP256: + return "CCMP-256" + case BIPCMAC128: + return "BIP-CMAC-128" + case BIPGMAC128: + return "BIP-GMAC-128" + case BIPGMAC256: + return "BIP-GMAC-256" + case SMS4: + return "SMS4" + case CKIP128: + return "CKIP-128" + case CKIP128_PMK: + return "CKIP-128 with PMK caching" + case CipherReserved3, CipherReserved15: + return "Reserved" + default: + return fmt.Sprintf("Unknown cipher suite (Value: %d)", c) + } +} + +func(a AKMSuite) String() string { + switch a { + case IEEE8021X: + return "802.1X" + case PSK: + return "PSK" + case FT_8021X: + return "FT over 802.1X" + case FT_PSK: + return "FT over PSK" + case WPA_8021X: + return "WPA with 802.1X" + case WPA_PSK: + return "WPA with PSK" + case OWE: + return "OWE" + case OWE_Transition: + return "OWE Transition Mode" + case SAE: + return "SAE" + case FT_SAE: + return "FT over SAE" + case FILS_SHA256: + return "FILS-SHA256" + case FILS_SHA384: + return "FILS-SHA384" + case FT_FILS_SHA256: + return "FT over FILS-SHA256" + case FT_FILS_SHA384: + return "FT over FILS-SHA384" + case OWE_transition_mode: + return "OWE transition mode" + default: + return fmt.Sprintf("Unknown or Reserved AKM suite (Value: %d)", a) + } +} + +func mapAKMSuite(akmSuiteInt int) string { + akmSuiteSelector: = akmSuiteInt & 0x000000FF + return AKMSuite(akmSuiteSelector).String() +} + +func mapCipherSuite(cipherSuiteInt int) string { + cipherSuiteSelector: = cipherSuiteInt & 0x000000FF + return CipherSuite(cipherSuiteSelector).String() +} + +func formatDate(dateStr string) string { + const dateFormat2Digit = "060102150405Z" + const dateFormat4Digit = "20060102150405Z" + + var t time.Time + var err error + t, err = time.Parse(dateFormat2Digit, dateStr) + if err != nil { + t, err = time.Parse(dateFormat4Digit, dateStr) + if err != nil { + return dateStr // Return the original string if parsing fails + } + } + + return t.Format("2006-01-02 15:04:05") +} \ No newline at end of file From 32d97cc706ba346f990096b2dcbaaad4f23ac344 Mon Sep 17 00:00:00 2001 From: E-ThanG <19691760+E-ThanG@users.noreply.github.com> Date: Wed, 6 Nov 2024 12:20:47 -0800 Subject: [PATCH 092/176] Update useOpenLdap.js (#8366) Changed to case-insensitive attribute name indices, set base_dn for getSubSchemaDN to null, and added explicit limits to sendLdapSearchRequest function calls. --- .../_components/ldapCondition/useOpenLdap.js | 33 ++++++++++++------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/useOpenLdap.js b/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/useOpenLdap.js index faae79176bd7..0d290e18fd5c 100644 --- a/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/useOpenLdap.js +++ b/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/useOpenLdap.js @@ -10,7 +10,7 @@ import { function useOpenLdap(form) { const performSearch = (filter, scope, attributes, base_dn) => { - return sendLdapSearchRequest({...form.value}, filter, scope, attributes, base_dn) + return sendLdapSearchRequest({...form.value}, filter, scope, attributes, base_dn, 1000) .then((result) => { return {results: parseLdapResponseToAttributeArray(result, extractAttributeFromFilter(filter)), success: true} } @@ -18,26 +18,37 @@ function useOpenLdap(form) { } const getSubSchemaDN = () => { - return sendLdapSearchRequest({...form.value}, null, 'base', ['subSchemaSubEntry'], form.value.basedn) + return sendLdapSearchRequest({...form.value}, null, 'base', ['subSchemaSubEntry'], '', 1) .then((response) => { - let firstAttribute = response[Object.keys(response)[0]] - return firstAttribute['subschemaSubentry'] + const keys = Object.keys(response) + if (keys.length) { + const firstAttribute = response[keys[0]] + const lowerCaseKeys = Object.keys(firstAttribute).map(key => key.toLowerCase()) + const subSchemaSubEntryIndex = lowerCaseKeys.indexOf('subschemasubentry') + if (subSchemaSubEntryIndex !== -1) { + const subSchemaSubEntryKey = Object.keys(firstAttribute)[subSchemaSubEntryIndex] + return firstAttribute[subSchemaSubEntryKey] + } + } + return [] }) } const fetchAttributeTypes = (subSchemaDN) => { - return sendLdapSearchRequest({...form.value}, '(objectclass=subschema)', - 'base', - ['attributeTypes'], - subSchemaDN) + return sendLdapSearchRequest({...form.value}, '(objectClass=subSchema)', 'base', ['attributeTypes'], subSchemaDN, 1000) .then((response) => { const keys = Object.keys(response) if (keys.length) { - const { attributeTypes } = response[keys[0]] - return attributeTypes + const firstAttribute = response[keys[0]] + const lowerCaseKeys = Object.keys(firstAttribute).map(key => key.toLowerCase()) + const attributeTypesIndex = lowerCaseKeys.indexOf('attributetypes') + if (attributeTypesIndex !== -1) { + const attributeTypesKey = Object.keys(firstAttribute)[attributeTypesIndex] + return firstAttribute[attributeTypesKey] + } } return [] - }) + }) } const getAttributes = () => { From 2b27dbeab63e0adc03c66afb1ca9edb727b965db Mon Sep 17 00:00:00 2001 From: Darren+Satkunas Date: Wed, 6 Nov 2024 20:23:06 +0000 Subject: [PATCH 093/176] Change useAdLdap.js to case-insensitive attribute name indices, fixes #8366 --- .../_components/ldapCondition/useAdLdap.js | 31 +++++++++++++------ 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/useAdLdap.js b/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/useAdLdap.js index 4bf2e7e1442e..59d002a36305 100644 --- a/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/useAdLdap.js +++ b/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/useAdLdap.js @@ -36,23 +36,34 @@ function useAdLdap(form) { } const getSubSchemaDN = () => { - return sendLdapSearchRequest({...form.value}, null, 'base', ['subschemaSubentry'], '') - .then((result) => { - let firstAttribute = result[Object.keys(result)[0]] - return firstAttribute['subSchemaSubEntry'] + return sendLdapSearchRequest({...form.value}, null, 'base', ['subschemaSubentry'], '', 1) + .then((response) => { + const keys = Object.keys(response) + if (keys.length) { + const firstAttribute = response[keys[0]] + const lowerCaseKeys = Object.keys(firstAttribute).map(key => key.toLowerCase()) + const subSchemaSubEntryIndex = lowerCaseKeys.indexOf('subschemasubentry') + if (subSchemaSubEntryIndex !== -1) { + const subSchemaSubEntryKey = Object.keys(firstAttribute)[subSchemaSubEntryIndex] + return firstAttribute[subSchemaSubEntryKey] + } + } + return [] }) } const fetchAttributeTypes = (subSchemaDN) => { - return sendLdapSearchRequest({...form.value}, '(objectclass=subschema)', - 'base', - ['attributetypes'], - subSchemaDN) + return sendLdapSearchRequest({...form.value}, '(objectClass=subSchema)', 'base', ['attributeTypes'], subSchemaDN, 1000) .then((response) => { const keys = Object.keys(response) if (keys.length) { - const { attributeTypes } = response[keys[0]] - return attributeTypes + const firstAttribute = response[keys[0]] + const lowerCaseKeys = Object.keys(firstAttribute).map(key => key.toLowerCase()) + const attributeTypesIndex = lowerCaseKeys.indexOf('attributetypes') + if (attributeTypesIndex !== -1) { + const attributeTypesKey = Object.keys(firstAttribute)[attributeTypesIndex] + return firstAttribute[attributeTypesKey] + } } return [] }) From e6435e82959597cf702b9cead67d53dfb94e63bf Mon Sep 17 00:00:00 2001 From: Darren+Satkunas Date: Thu, 31 Oct 2024 20:26:44 +0000 Subject: [PATCH 094/176] allow custom attributes in LDAP Explorer, fixes #8367 --- .../sources/_components/LdapRuleCondition.vue | 2 ++ .../ldapCondition/LdapAttributeSelector.vue | 14 +++++++++----- .../ldapCondition/MultiselectFacade.vue | 8 +++++++- 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/html/pfappserver/root/src/views/Configuration/sources/_components/LdapRuleCondition.vue b/html/pfappserver/root/src/views/Configuration/sources/_components/LdapRuleCondition.vue index 80f9eebb6bdf..a9776bd7ce19 100644 --- a/html/pfappserver/root/src/views/Configuration/sources/_components/LdapRuleCondition.vue +++ b/html/pfappserver/root/src/views/Configuration/sources/_components/LdapRuleCondition.vue @@ -2,6 +2,7 @@
diff --git a/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/LdapAttributeSelector.vue b/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/LdapAttributeSelector.vue index d2bee1455636..8be4def1cce2 100644 --- a/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/LdapAttributeSelector.vue +++ b/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/LdapAttributeSelector.vue @@ -4,9 +4,11 @@ :value="inputValue" :label="text" :isLoading="isLoading" + :taggable="taggable" :track-by="text" :single-label="singleLabel" :on-select="onSelect" + :on-tag="onTag" :on-remove="onRemove" :on-open="onOpen" :on-close="onClose" @@ -54,10 +56,7 @@ function setup(props, _) { // eslint-disable-line const isFocused = ref(false) const isLoading = computed(() => inject(ProvidedKeys.LdapAttributesLoading).value) const isConnected = computed(() => inject(ProvidedKeys.connectedToLdap).value) - const allOptions = computed(() => { - if (!isConnected.value) { - return [] - } + const inputOptions = computed(() => { return moveSelectionToTop(inject(ProvidedKeys.LdapAttributes).value.map(valueToSelectValue)) }) const isDisabled = inject('isLoading') @@ -109,6 +108,10 @@ function setup(props, _) { // eslint-disable-line validateChoice() } + function onTag(value) { + onSelect(valueToSelectValue(value)) + } + function onOpen() { isFocused.value = true } @@ -124,12 +127,13 @@ function setup(props, _) { // eslint-disable-line validateChoice() return { - inputOptions: allOptions, + inputOptions, isDisabled, isFocused, isLoading, isConnected, onSelect, + onTag, onOpen, onClose, onRemove, diff --git a/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/MultiselectFacade.vue b/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/MultiselectFacade.vue index a5c8cd2b9578..67e27a090138 100644 --- a/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/MultiselectFacade.vue +++ b/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/MultiselectFacade.vue @@ -25,10 +25,12 @@ :selected-label="selectedLabel" :show-labels="showLabels" :show-pointer="showPointer" + :taggable="taggable" :track-by="trackBy" :value="value" @search-change="onSearch" @select="onSelect" + @tag="onTag" @open="onOpen" @remove="onRemove" @close="onClose" @@ -105,7 +107,6 @@ export const props = { validator: value => ['sm', 'md', 'lg'].includes(value) }, - onSearch: { type: Function, default: () => {} @@ -116,6 +117,11 @@ export const props = { default: () => {} }, + onTag: { + type: Function, + default: () => {} + }, + onOpen: { type: Function, default: () => {} From efa30dbce0040c95b7eb528a2bea8e817aca77fe Mon Sep 17 00:00:00 2001 From: Darren+Satkunas Date: Mon, 4 Nov 2024 21:29:17 +0000 Subject: [PATCH 095/176] make value taggable --- .../sources/_components/ldapCondition/LdapSearchInput.vue | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/LdapSearchInput.vue b/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/LdapSearchInput.vue index 789bc803c342..181e31f0bafc 100644 --- a/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/LdapSearchInput.vue +++ b/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/LdapSearchInput.vue @@ -27,9 +27,11 @@ :options="inputOptions" :value="inputValue" :label="text" + :taggable="taggable" :track-by="text" :single-label="singleLabel" :on-select="onSelect" + :on-tag="onTag" :on-open="onOpen" :on-remove="onRemove" :on-close="onClose" @@ -179,6 +181,10 @@ function setup(props, context) { // eslint-disable-line } } + function onTag(value) { + onSelect(valueToSelectValue(value)) + } + function onOpen() { if (selectedValue.value !== null) { inputOptions.value = [selectedValue.value] @@ -206,6 +212,7 @@ function setup(props, context) { // eslint-disable-line isConnected, onSearch, onSelect, + onTag, onOpen, onClose, onRemove, From 25dfae5ff124471c4438783955f0afdce2872dcd Mon Sep 17 00:00:00 2001 From: Darren+Satkunas Date: Wed, 6 Nov 2024 20:42:09 +0000 Subject: [PATCH 096/176] update NEWS.asciidoc --- NEWS.asciidoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/NEWS.asciidoc b/NEWS.asciidoc index b89d97123774..278e4d5d9668 100644 --- a/NEWS.asciidoc +++ b/NEWS.asciidoc @@ -36,6 +36,8 @@ For a list of compatibility related changes see the < Date: Thu, 7 Nov 2024 19:58:15 +0000 Subject: [PATCH 097/176] Only show registered nodes --- .../PacketFence/Controller/Status.pm | 2 +- lib/pf/person.pm | 22 +++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/html/captive-portal/lib/captiveportal/PacketFence/Controller/Status.pm b/html/captive-portal/lib/captiveportal/PacketFence/Controller/Status.pm index 8d3975ff453f..3d0061715b62 100644 --- a/html/captive-portal/lib/captiveportal/PacketFence/Controller/Status.pm +++ b/html/captive-portal/lib/captiveportal/PacketFence/Controller/Status.pm @@ -93,7 +93,7 @@ sub is_lost_stolen { sub userIsAuthenticated : Private { my ( $self, $c ) = @_; my $pid = $c->user_session->{"username"} // $c->{_session}->{username}; - my @person_nodes = person_nodes($pid); + my @person_nodes = person_reg_nodes($pid); my @nodes; foreach my $person_node (@person_nodes) { my $node = node_view($person_node->{mac}); diff --git a/lib/pf/person.pm b/lib/pf/person.pm index c640211fe30a..0176d814b957 100644 --- a/lib/pf/person.pm +++ b/lib/pf/person.pm @@ -37,6 +37,7 @@ BEGIN { person_view_simple person_modify person_nodes + person_reg_nodes person_security_events person_cleanup persons_without_nodes @@ -282,6 +283,27 @@ sub person_nodes { return @{$iter->all // []}; } +sub person_reg_nodes { + my ($pid) = @_; + my ($status, $iter) = pf::dal::node->search( + -where => { + pid => $pid, + }, + -columns => [qw(mac pid notes regdate unregdate status user_agent computername device_class time_balance bandwidth_balance)], + #To avoid join + -from => pf::dal::node->table, + -with_class => undef, + -where => { + status => "reg", + }, + ); + if (is_error($status)) { + return; + } + + return @{$iter->all // []}; +} + =head2 person_unassign_nodes unassign the nodes of a person From 4e031182f2ba66693653da3cf81a4382fc21955f Mon Sep 17 00:00:00 2001 From: James Rouzier Date: Fri, 8 Nov 2024 14:35:30 +0000 Subject: [PATCH 098/176] Remove unreliable test --- t/unittest/ssl.t | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/t/unittest/ssl.t b/t/unittest/ssl.t index 3fad978286ac..73956f1109d0 100755 --- a/t/unittest/ssl.t +++ b/t/unittest/ssl.t @@ -22,7 +22,7 @@ BEGIN { use setup_test_config; } -use Test::More tests => 22; +use Test::More tests => 20; use Test::NoWarnings; use pf::constants qw($TRUE $FALSE); @@ -211,13 +211,6 @@ is(ref($x509), "Crypt::OpenSSL::X509", "x509_from_string returns a Crypt::OpenSS is($x509->subject, "CN=packetfence.org", "certificate has the right subject"); -{ - my ($res, $inter) = pf::ssl::fetch_all_intermediates($x509); - is(scalar(@$inter), 1, "right amount of intermediates was found"); - is($inter->[0]->subject(), "C=US, O=Let's Encrypt, CN=Let's Encrypt Authority X3", "right intermediate subject was found"); -} - - my $cert_with_chain_cert = < Date: Fri, 8 Nov 2024 11:03:33 -0500 Subject: [PATCH 099/176] Fix import from previous PF versions --- addons/full-import/import.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/addons/full-import/import.sh b/addons/full-import/import.sh index b015e3679a3f..d6ed1ec89723 100755 --- a/addons/full-import/import.sh +++ b/addons/full-import/import.sh @@ -40,7 +40,7 @@ prepare_import() { ls -l | grep -v export.tgz main_splitter - files_dump=`ls packetfence-conf-*` + files_dump=`ls packetfence-conf-* || ls packetfence-files-*` echo "Found files dump '$files_dump'" echo "Extracting files dump" From 4c7a2a49ed5893ec7ca2115b498d6c30203ef25b Mon Sep 17 00:00:00 2001 From: Darren+Satkunas Date: Mon, 11 Nov 2024 13:42:38 +0000 Subject: [PATCH 100/176] change bypass_acls schema from TEXT to MEDIUMTEXT --- html/pfappserver/root/src/globals/mysql.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/html/pfappserver/root/src/globals/mysql.js b/html/pfappserver/root/src/globals/mysql.js index edecc33c341f..a3b7191adc31 100644 --- a/html/pfappserver/root/src/globals/mysql.js +++ b/html/pfappserver/root/src/globals/mysql.js @@ -180,7 +180,7 @@ export const MysqlDatabase = { }, bypass_acls: { type: MysqlString, - maxLength: 255, + maxLength: 16777215, default: null }, voip: { From 2f7b68cd9c69a02e061eaf7be754e7fb9ad5deb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cabprasa=E2=80=9D?= <123509565+abprasa@users.noreply.github.com> Date: Tue, 12 Nov 2024 11:37:05 +0000 Subject: [PATCH 101/176] Updated the NEWS.asciidoc with a bug fix for an Aruba issue --- NEWS.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/NEWS.asciidoc b/NEWS.asciidoc index 278e4d5d9668..b560df77ed26 100644 --- a/NEWS.asciidoc +++ b/NEWS.asciidoc @@ -43,6 +43,7 @@ For a list of compatibility related changes see the < Date: Tue, 12 Nov 2024 11:04:51 -0500 Subject: [PATCH 102/176] Feature/inv-105 improve docker images (#8337) * Clean pfdebian from extra documentation, reorganize pfdebian layers * Add exclusion (container.io|docker-ce|docker-ce-cli|docker-ce-rootless-extras|gcc) in images builder * Install quietly on pfdebian container * Install quietly install pf dependencies * Clean package index on pfdebian container * Fix find and delete files * Move all cleaning in a layer, add comments * Move cleaning locales-all after all installation and in cleaning part * Use -delete in find to remove empty directories * Add --no-install-recommends during installation * Add localpurge in pfdebian * Ignore certificate for nodejs * Add curl recommendation dependencies * Reduce pfsetacls size * pfsetacls fix docker version * Fix install pfsetacls on alpine * Container pfsetacls separate build from production --- .../debian/install-pf-dependencies.sh | 2 +- containers/kaniko_vars | 4 +- containers/pfdebian/Dockerfile | 44 ++++++++++++------ containers/pfsetacls/Dockerfile | 46 +++++++++++++++---- 4 files changed, 70 insertions(+), 26 deletions(-) diff --git a/addons/dev-helpers/debian/install-pf-dependencies.sh b/addons/dev-helpers/debian/install-pf-dependencies.sh index 1f75bc34d625..bddcf3786a3b 100755 --- a/addons/dev-helpers/debian/install-pf-dependencies.sh +++ b/addons/dev-helpers/debian/install-pf-dependencies.sh @@ -15,7 +15,7 @@ apt-install-depends() { | sed -n \ -e "/^Inst $pkg /d" \ -e 's/^Inst \([^ ]\+\) .*$/\1/p' \ - | xargs apt-get install -y + | xargs apt-get -qq --no-install-recommends install -y } declare -p PKGS_TO_EXCLUDE diff --git a/containers/kaniko_vars b/containers/kaniko_vars index 4d8027db8b43..3d3305b083ba 100644 --- a/containers/kaniko_vars +++ b/containers/kaniko_vars @@ -4,7 +4,7 @@ export PF_VERSION=$(egrep -o '[0-9]+\.[0-9]+' $CI_PROJECT_DIR/conf/pf-release) # only used for pfdebian build -export PKGS_TO_EXCLUDE="packetfence|freeradius" +export PKGS_TO_EXCLUDE="packetfence|freeradius|container.io|docker-ce|docker-ce-cli|docker-ce-rootless-extras|gcc" # variables to pass during build -DOCKFILE_VARS='PF_VERSION KNK_REGISTRY_URL IMAGE_TAG FINGERBANK_BUILD_API_KEY BUILD_PFAPPSERVER_VUE PKGS_TO_EXCLUDE' \ No newline at end of file +DOCKFILE_VARS='PF_VERSION KNK_REGISTRY_URL IMAGE_TAG FINGERBANK_BUILD_API_KEY BUILD_PFAPPSERVER_VUE PKGS_TO_EXCLUDE' diff --git a/containers/pfdebian/Dockerfile b/containers/pfdebian/Dockerfile index 5b45e80f0bca..d11fe123d7d7 100644 --- a/containers/pfdebian/Dockerfile +++ b/containers/pfdebian/Dockerfile @@ -1,6 +1,12 @@ FROM debian:12 -RUN apt-get update && apt-get install -y aptitude wget gnupg +# do not want docs +RUN printf 'path-exclude /usr/share/doc/*\npath-include /usr/share/doc/*/copyright\npath-exclude /usr/share/man/*\npath-exclude /usr/share/groff/*\npath-exclude /usr/share/info/*\npath-exclude /usr/share/lintian/*\npath-exclude /usr/share/linda/*' > /etc/dpkg/dpkg.cfg.d/01_nodoc + +# Prepare container with deps +RUN apt-get -qq update && \ + apt-get -qq --no-install-recommends install -y aptitude wget gnupg sudo && \ + apt-get -qq install -y curl RUN /bin/bash -c "echo 'exit 0' > /usr/bin/systemctl" RUN /bin/bash -c "echo 'exit 0' > /bin/systemctl" @@ -12,20 +18,31 @@ ARG PKGS_TO_EXCLUDE # This is to ensure a cache miss when there is a change in the dependencies COPY debian/control /tmp/ COPY rpm/packetfence.spec /tmp/ +COPY addons/dev-helpers/debian/install-pf-dependencies.sh /usr/local/pf/addons/dev-helpers/debian/install-pf-dependencies.sh -RUN /bin/bash -c "echo 'deb http://inverse.ca/downloads/PacketFence/debian/${PF_VERSION} bookworm bookworm' > /etc/apt/sources.list.d/packetfence_deps.list" && \ - wget -q -O - https://inverse.ca/downloads/GPG_PUBLIC_KEY | apt-key add - - -RUN /bin/bash -c "echo 'deb https://deb.nodesource.com/node_20.x nodistro main' > /etc/apt/sources.list.d/nodejs.list" && \ - wget -q -O - https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | apt-key add - - -RUN apt-get update +# Prepare Repos +RUN curl -fsSL https://inverse.ca/downloads/GPG_PUBLIC_KEY | gpg --dearmor -o /etc/apt/keyrings/packetfence.gpg && \ + /bin/bash -c "echo 'deb [signed-by=/etc/apt/keyrings/packetfence.gpg] http://inverse.ca/downloads/PacketFence/debian/${PF_VERSION} bookworm bookworm' > /etc/apt/sources.list.d/packetfence_deps.list" && \ + curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource-repo.gpg && \ + /bin/bash -c "echo 'deb [signed-by=/etc/apt/keyrings/nodesource-repo.gpg] https://deb.nodesource.com/node_20.x nodistro main' > /etc/apt/sources.list.d/nodejs.list" && \ + apt-get -qq update -COPY addons/dev-helpers/debian/install-pf-dependencies.sh /usr/local/pf/addons/dev-helpers/debian/install-pf-dependencies.sh +# Install Deps RUN /usr/local/pf/addons/dev-helpers/debian/install-pf-dependencies.sh && \ - rm -f /usr/local/fingerbank/db/fingerbank_*.db - -RUN apt-get install -y freeradius-common + rm -f /usr/local/fingerbank/db/fingerbank_*.db && \ + apt-get -qq --no-install-recommends install -y freeradius-common && \ + apt-get -qq --no-install-recommends -y install localepurge && \ + printf '#USE_DPKG\nMANDELETE\nDONTBOTHERNEWLOCALE\n#SHOWFREEDSPACE\n#QUICKNDIRTYCALC\n#VERBOSE\nen' > /etc/locale.nopurge && \ + /usr/sbin/localepurge + +# Clean +RUN apt-get -qq remove --purge -y locales-all localepurge && \ + apt-get clean && \ + rm -rf /etc/locale.nopurge && \ + find /usr/share/doc -depth -type f ! -name copyright -exec rm {} \; && \ + find /usr/share/doc -type d -empty -delete && \ + rm -rf /usr/share/groff/* /usr/share/info/* && \ + rm -rf /usr/share/lintian/* /usr/share/linda/* /var/cache/man/* RUN useradd -U -r -d "/usr/local/pf" -s /bin/sh -c "PacketFence" -M pf @@ -33,6 +50,3 @@ RUN mkdir -p /usr/local/pf/lib/ && \ ln -s /usr/local/fingerbank/lib/fingerbank /usr/local/pf/lib/fingerbank RUN chown -R pf: /usr/local/pf - -# To be removed -RUN apt-get install -y libcisco-accesslist-parser-perl libparse-eyapp-perl diff --git a/containers/pfsetacls/Dockerfile b/containers/pfsetacls/Dockerfile index 48021c43bed8..10597e6cbe8a 100644 --- a/containers/pfsetacls/Dockerfile +++ b/containers/pfsetacls/Dockerfile @@ -1,5 +1,4 @@ - -FROM golang:1.23.1-bookworm +FROM golang:1.23.1-bookworm AS build ENV SEMAPHORE_VERSION="development" SEMAPHORE_ARCH="linux_amd64" \ SEMAPHORE_CONFIG_PATH="${SEMAPHORE_CONFIG_PATH:-/etc/semaphore}" \ @@ -8,9 +7,8 @@ ENV SEMAPHORE_VERSION="development" SEMAPHORE_ARCH="linux_amd64" \ # hadolint ignore=DL3013 RUN curl -fsSL https://deb.nodesource.com/setup_16.x | bash - -RUN apt update && apt install -y gcc g++ make git mariadb-client python3 pip python3-openssl openssl ca-certificates curl libcurl4-openssl-dev openssh-client tini nodejs bash rsync && \ - apt install -y python3-dev libffi-dev python3-paramiko &&\ - rm -rf /var/cache/apt/* +RUN apt update && apt install -y gcc g++ make git mariadb-client python3 pip python3-openssl openssl ca-certificates curl libcurl4-openssl-dev openssh-client tini nodejs bash rsync python3-dev libffi-dev python3-paramiko &&\ + apt-get clean RUN VER=`python3 -c 'import sys; val=sys.version_info;print(str(val.major)+"."+str(val.minor))'` ; \ rm -rf /usr/lib/python$VER/EXTERNALLY-MANAGED && \ @@ -31,8 +29,7 @@ RUN adduser --disabled-password -u 1002 --gecos 0 semaphore && \ RUN cd $(go env GOPATH) && curl -sL https://taskfile.dev/install.sh | sh -s -- "v3.33.0" - -RUN npm install -g npm@9.6.7 +RUN npm install --omit=dev -g npm@9.6.7 RUN git config --global --add safe.directory /go/src/github.com/ansible-semaphore/semaphore @@ -45,7 +42,40 @@ RUN git clone -qq --depth 1 --single-branch --branch ${release} ${source} ./ RUN deployment/docker/ci/bin/install +# Prepapre the VM +FROM debian:12 + +COPY --from=build /usr/lib /usr/lib +COPY --from=build /usr/local/lib /usr/local/lib +COPY --from=build /usr/local/include /usr/local/include +COPY --from=build /usr/local/bin /usr/local/bin +COPY --from=build /var/lib/semaphore /var/lib/semaphore +COPY --from=build /go/src/github.com/ansible-semaphore/semaphore/bin/semaphore /usr/local/bin/semaphore/bin/semaphore +COPY --from=build /go/src/github.com/ansible-semaphore/semaphore/LICENSE /usr/local/bin/semaphore/LICENSE + +RUN apt -q update && \ + apt -q -y install curl && \ + curl -fsSL https://deb.nodesource.com/setup_16.x | bash - + +RUN apt update && \ + apt install -y -q --no-install-recommends git mariadb-client python3 pip python3-openssl openssl ca-certificates libcurl4-openssl-dev openssh-client tini nodejs bash rsync python3-dev libffi-dev python3-paramiko &&\ + apt-get clean + +RUN VER=`python3 -c 'import sys; val=sys.version_info;print(str(val.major)+"."+str(val.minor))'` ; \ + rm -rf /usr/lib/python$VER/EXTERNALLY-MANAGED && \ + pip3 install --upgrade pip cffi &&\ + pip3 install ansible && \ + pip3 install ansible-pylibssh + +RUN adduser --disabled-password -u 1002 --gecos 0 semaphore && \ + mkdir -p /tmp/semaphore && \ + mkdir -p /etc/semaphore && \ + chown -R semaphore:0 /tmp/semaphore && \ + chown -R semaphore:0 /etc/semaphore && \ + chown -R semaphore:0 /var/lib/semaphore && \ + chown -R semaphore:0 /usr/local/bin/semaphore + USER semaphore EXPOSE 3000 ENTRYPOINT ["/usr/local/bin/semaphore-wrapper"] -CMD ["./bin/semaphore", "server", "--config", "/etc/semaphore/config.json"] +CMD ["/usr/local/bin/semaphore/bin/semaphore", "server", "--config", "/etc/semaphore/config.json"] From be1dd6331849fcddf50236d8ce2f4f5e734d5e13 Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Wed, 13 Nov 2024 09:15:23 -0500 Subject: [PATCH 103/176] Added log on create certificate --- go/plugin/caddy2/pfpki/models/models.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/plugin/caddy2/pfpki/models/models.go b/go/plugin/caddy2/pfpki/models/models.go index aeb00ed1a9e4..cfe0b350d842 100644 --- a/go/plugin/caddy2/pfpki/models/models.go +++ b/go/plugin/caddy2/pfpki/models/models.go @@ -1304,7 +1304,7 @@ func (c Cert) New() (types.Info, error) { Information.Status = http.StatusConflict return Information, errors.New(dbError) } - + log.LoggerWContext(c.Ctx).Info("Certificate " + c.Cn + " has been generated from profile " + prof.Name + " and sign by " + prof.Ca.Cn) c.DB.Select("id, cn, mail, street_address, organisation, organisational_unit, country, state, locality, postal_code, cert, profile_id, profile_name, ca_name, ca_id, valid_until, serial_number, dns_names, ip_addresses").Where("cn = ? AND profile_name = ?", c.Cn, prof.Name).First(&newcertdb) Information.Entries = newcertdb Information.Serial = SerialNumber.String() From ac76ca0df3f22080beea9979817387a1ea477b63 Mon Sep 17 00:00:00 2001 From: Darren+Satkunas Date: Wed, 13 Nov 2024 18:21:05 +0000 Subject: [PATCH 104/176] add expected response in service async actions --- .../root/src/store/modules/cluster.js | 33 +++--------- .../root/src/store/modules/pfqueue.js | 52 +++++++++++-------- 2 files changed, 37 insertions(+), 48 deletions(-) diff --git a/html/pfappserver/root/src/store/modules/cluster.js b/html/pfappserver/root/src/store/modules/cluster.js index 26a583addeb7..0178eb4246ff 100644 --- a/html/pfappserver/root/src/store/modules/cluster.js +++ b/html/pfappserver/root/src/store/modules/cluster.js @@ -65,45 +65,24 @@ const api = (state, server = store.state.system.hostname) => { return apiCall.postQuiet(['service', id, 'restart'], { async: true }, { headers }) .then(response => { const { data: { task_id } = {} } = response - return store.dispatch('pfqueue/pollTaskStatus', { task_id, headers }).then(response => { - const { restart } = response - if (parseInt(restart) > 0) { - return response - } - else { - throw new Error(i18n.t(`Could not restart {id} on {server}.`, { server, id })) - } - }) + const expect = ({ item: { restart = 0 } = {} }) => (+restart > 0) + return store.dispatch('pfqueue/pollTaskStatus', { task_id, headers, expect }) }) }, start: id => { return apiCall.postQuiet(['service', id, 'start'], { async: true }, { headers }) .then(response => { const { data: { task_id } = {} } = response - return store.dispatch('pfqueue/pollTaskStatus', { task_id, headers }).then(response => { - const { start } = response - if (parseInt(start) > 0) { - return response - } - else { - throw new Error(i18n.t(`Could not start {id} on {server}.`, { server, id })) - } - }) + const expect = ({ item: { start = 0 } = {} }) => (+start > 0) + return store.dispatch('pfqueue/pollTaskStatus', { task_id, headers, expect }) }) }, stop: id => { return apiCall.postQuiet(['service', id, 'stop'], { async: true }, { headers }) .then(response => { const { data: { task_id } = {} } = response - return store.dispatch('pfqueue/pollTaskStatus', { task_id, headers }).then(response => { - const { stop } = response - if (parseInt(stop) > 0) { - return response - } - else { - throw new Error(i18n.t(`Could not stop {id} on {server}.`, { server, id })) - } - }) + const expect = ({ item: { stop = 0 } = {} }) => (+stop > 0) + return store.dispatch('pfqueue/pollTaskStatus', { task_id, headers, expect }) }) }, systemService: id => { diff --git a/html/pfappserver/root/src/store/modules/pfqueue.js b/html/pfappserver/root/src/store/modules/pfqueue.js index c63c9665de81..4063fa1bb440 100644 --- a/html/pfappserver/root/src/store/modules/pfqueue.js +++ b/html/pfappserver/root/src/store/modules/pfqueue.js @@ -14,11 +14,35 @@ const POLL_RETRY_NUM = 20 // delay between retries (seconds) const POLL_RETRY_INTERVAL = 3 -// grace period after initial command (seconds), avoid race-condition during pfperl-api restart -const POLL_GRACE_PERIOD = 10 +const retry = ({ task_id, headers, expect }) => { + return new Promise((resolve, reject) => { + setTimeout(() => { // debounce retries + pollTaskStatus({ task_id, headers, expect }) + .then(resolve) + .catch(err => { + if (err.message) { // AxiosError + const data = i18n.t('{message}. No response after {timeout} seconds, gave up after {retries} retries.', { message: err.message, timeout: POLL_RETRY_NUM * POLL_RETRY_INTERVAL, retries: POLL_RETRY_NUM }) + reject({ response: { data } }) + } + else { // recursion + reject(err) + } + }) + }, POLL_RETRY_INTERVAL * 1E3) + }) +} -const pollTaskStatus = ({ task_id, headers, grace_period = 0 }) => { +const pollTaskStatus = ({ task_id, headers, expect }) => { return apiCall.getQuiet(`pfqueue/task/${task_id}/status/poll`, { headers }).then(response => { + if (expect && !expect(response.data)) { // handle unexpected response + if (!(task_id in retries)) + retries[task_id] = 0 + else + retries[task_id]++ + if (retries[task_id] >= POLL_RETRY_NUM) // give up after N retries + throw new Error('Unexpected response') + return retry({ task_id, headers, expect }) + } if (task_id in retries) delete retries[task_id] return response.data @@ -36,21 +60,7 @@ const pollTaskStatus = ({ task_id, headers, grace_period = 0 }) => { retries[task_id]++ if (retries[task_id] >= POLL_RETRY_NUM) // give up after N retries throw error - return new Promise((resolve, reject) => { - setTimeout(() => { // debounce retries - pollTaskStatus({ task_id, headers }) - .then(resolve) - .catch(err => { - if (err.message) { // AxiosError - const data = i18n.t('{message}. No response after {timeout} seconds, gave up after {retries} retries.', { message: err.message, timeout: POLL_RETRY_NUM * POLL_RETRY_INTERVAL, retries: POLL_RETRY_NUM }) - reject({ response: { data } }) - } - else { // recursion - reject(error) - } - }) - }, (POLL_RETRY_INTERVAL + grace_period) * 1E3) - }) + return retry({ task_id, headers, expect }) } }) } @@ -92,10 +102,10 @@ const actions = { }) }) }, - pollTaskStatus: ({ dispatch }, { task_id, headers }) => { - return api.pollTaskStatus({ task_id, headers }).then(data => { // 'poll' returns immediately, or timeout after 15s + pollTaskStatus: ({ dispatch }, { task_id, headers, expect }) => { + return api.pollTaskStatus({ task_id, headers, expect }).then(data => { // 'poll' returns immediately, or timeout after 15s if ('status' in data && data.status.toString() === '202') { // 202: in progress - return dispatch('pollTaskStatus', { task_id, headers, grace_period: POLL_GRACE_PERIOD }) // recurse + return dispatch('pollTaskStatus', { task_id, headers, expect }) // recurse } if ('error' in data) { throw new Error(data.error.message) From 8e951398d76762c525df40b0875af72784566909 Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Wed, 13 Nov 2024 14:56:55 -0500 Subject: [PATCH 105/176] NEWS.asciidoc entry for PR #8383 --- NEWS.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/NEWS.asciidoc b/NEWS.asciidoc index b560df77ed26..5541a3f150db 100644 --- a/NEWS.asciidoc +++ b/NEWS.asciidoc @@ -44,6 +44,7 @@ For a list of compatibility related changes see the < Date: Wed, 13 Nov 2024 20:22:53 -0800 Subject: [PATCH 106/176] Change field bitmask from 0x000000FF to 0x0000000F The bit field is only 4 bits wide, this only needs the 0x0000000F mask. --- go/cron/flush_radius_audit_log_job.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/go/cron/flush_radius_audit_log_job.go b/go/cron/flush_radius_audit_log_job.go index 1b9b24587175..2c98af92d6f6 100644 --- a/go/cron/flush_radius_audit_log_job.go +++ b/go/cron/flush_radius_audit_log_job.go @@ -482,12 +482,12 @@ func(a AKMSuite) String() string { } func mapAKMSuite(akmSuiteInt int) string { - akmSuiteSelector: = akmSuiteInt & 0x000000FF + akmSuiteSelector: = akmSuiteInt & 0x0000000F return AKMSuite(akmSuiteSelector).String() } func mapCipherSuite(cipherSuiteInt int) string { - cipherSuiteSelector: = cipherSuiteInt & 0x000000FF + cipherSuiteSelector: = cipherSuiteInt & 0x0000000F return CipherSuite(cipherSuiteSelector).String() } @@ -506,4 +506,4 @@ func formatDate(dateStr string) string { } return t.Format("2006-01-02 15:04:05") -} \ No newline at end of file +} From 3ccf1931e8788800c529dba9d938a7656e1a1afb Mon Sep 17 00:00:00 2001 From: E-ThanG <19691760+E-ThanG@users.noreply.github.com> Date: Wed, 13 Nov 2024 20:39:56 -0800 Subject: [PATCH 107/176] Add AM/PM and UTC timezone to time stamp Add AM/PM and UTC timezone to time stamp --- go/cron/flush_radius_audit_log_job.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/cron/flush_radius_audit_log_job.go b/go/cron/flush_radius_audit_log_job.go index 2c98af92d6f6..fdd8c9ce804d 100644 --- a/go/cron/flush_radius_audit_log_job.go +++ b/go/cron/flush_radius_audit_log_job.go @@ -505,5 +505,5 @@ func formatDate(dateStr string) string { } } - return t.Format("2006-01-02 15:04:05") + return t.Format("2006-01-02 03:04:05 PM UTC") } From 525b87e641e095f8585798c4edda2405fe7577ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cabprasa=E2=80=9D?= <123509565+abprasa@users.noreply.github.com> Date: Fri, 15 Nov 2024 13:56:47 +0000 Subject: [PATCH 108/176] Updated the externalportal.pm --- lib/pf/web/externalportal.pm | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/pf/web/externalportal.pm b/lib/pf/web/externalportal.pm index 583c8bfd53a9..c388490365e7 100644 --- a/lib/pf/web/externalportal.pm +++ b/lib/pf/web/externalportal.pm @@ -45,6 +45,7 @@ Readonly our $SWITCH_REWRITE_MAP => { 'guest' => 'Ubiquiti::Unifi', 'AeroHIVE' => 'AeroHIVE::AP', 'Cisco::Catalyst_2960' => 'Cisco::Cisco_IOS_15_0', + 'Cisco::WLC' => 'Cisco::Cisco_WLC_AireOS', 'Aruba::CX' => 'Aruba::ArubaOS_CX_10.x', 'Aruba::2930M' => 'Aruba::ArubaOS_Switch_16.x', 'Meraki::MS220_8' => 'Meraki::MS_v15', From 4d42ae964f7b86586f3889675d860d1d0398385a Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Tue, 19 Nov 2024 08:40:24 -0500 Subject: [PATCH 109/176] Fixes #8393 --- lib/pf/Switch.pm | 15 +++++++++++++++ lib/pf/Switch/MockedSwitch.pm | 15 +++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/lib/pf/Switch.pm b/lib/pf/Switch.pm index 723cee48f421..47ff9770b1d4 100644 --- a/lib/pf/Switch.pm +++ b/lib/pf/Switch.pm @@ -4463,6 +4463,21 @@ sub implicit_acl { return $FALSE; } +=item populateAccessPointMACIP + +Fetch all the AP on the controller and cache it + +=cut + + +sub populateAccessPointMACIP { + my ($self) = @_; + my $logger = $self->logger; + $logger->warn("populateAccessPointMACIP not implemented for this switch module"); + + return $FALSE; +} + =back =head1 AUTHOR diff --git a/lib/pf/Switch/MockedSwitch.pm b/lib/pf/Switch/MockedSwitch.pm index 0e1d13449d33..9f0ab8205326 100644 --- a/lib/pf/Switch/MockedSwitch.pm +++ b/lib/pf/Switch/MockedSwitch.pm @@ -3467,6 +3467,21 @@ sub implicit_acl { return $FALSE; } +=item populateAccessPointMACIP + +Fetch all the AP on the controller and cache it + +=cut + + +sub populateAccessPointMACIP { + my ($self) = @_; + my $logger = $self->logger; + $logger->warn("populateAccessPointMACIP not implemented for this switch module"); + + return $false; +} + =back =head1 AUTHOR From 3a05d12ba2a5463e7efc3bb4ec87afbe3aff0807 Mon Sep 17 00:00:00 2001 From: James Rouzier Date: Tue, 19 Nov 2024 09:22:27 -0500 Subject: [PATCH 110/176] fix/perl-api-restart (#8391) * Add var/pfperl-api * WIP * save the task-id for pf-perl stop * set the status restart status --- lib/pf/UnifiedApi.pm | 40 +++++++++++++++++++++++- lib/pf/UnifiedApi/Controller/Services.pm | 12 ++++--- lib/pf/file_paths.pm | 3 ++ sbin/pfperl-api-docker-wrapper | 1 + var/pfperl-api/.gitignore | 4 +++ 5 files changed, 54 insertions(+), 6 deletions(-) create mode 100644 var/pfperl-api/.gitignore diff --git a/lib/pf/UnifiedApi.pm b/lib/pf/UnifiedApi.pm index 37cd17e6a8ee..83c9e323e223 100644 --- a/lib/pf/UnifiedApi.pm +++ b/lib/pf/UnifiedApi.pm @@ -23,9 +23,13 @@ use JSON::MaybeXS qw(); } } +use pf::services; +use pf::pfqueue::status_updater::redis; +use pf::util::pfqueue qw(consumer_redis_client); + use Mojo::Base 'Mojolicious'; use pf::util qw(add_jitter); -use pf::file_paths qw($log_conf_dir); +use pf::file_paths qw($log_conf_dir $pfperl_api_restart_task); use pf::SwitchFactory; pf::SwitchFactory->preloadAllModules(); use MojoX::Log::Log4perl; @@ -33,6 +37,7 @@ use pf::UnifiedApi::Controller; use pf::UnifiedApi::Controller::Config::Switches; use pf::I18N::pfappserver; use pfconfig::refresh_last_touch_cache; +use File::Slurp; our $MAX_REQUEST_HANDLED = 2000; our $REQUEST_HANDLED_JITTER = 500; @@ -107,6 +112,39 @@ sub before_server_start { } ); } + + if (-e $pfperl_api_restart_task) { + my $task_id = read_file($pfperl_api_restart_task, {err_mode => 'quiet'}); + unlink($pfperl_api_restart_task); + if (defined $task_id) { + chomp($task_id); + if ($task_id ne '') { + set_service_status($task_id, 'pfperl-api'); + } + } + } +} + +sub set_service_status { + my ($task_id, $service_id) = @_; + my $service = get_service($service_id); + if (!$service) { + return; + } + + my $updater = pf::pfqueue::status_updater::redis->new( connection => consumer_redis_client(), task_id => $task_id ); + my $pid = $service->pid(); + $updater->completed({restart => $pid ? 1 : 0, pid => $pid}); +} + +sub get_service { + my ($service_id) = @_; + my $class = $pf::services::ALL_MANAGERS{$service_id}; + if(defined($class) && $class->can('new')){ + return $class; + } + + return undef; } =head2 before_render_cb diff --git a/lib/pf/UnifiedApi/Controller/Services.pm b/lib/pf/UnifiedApi/Controller/Services.pm index 92d2526c6358..139dcc20cd8c 100644 --- a/lib/pf/UnifiedApi/Controller/Services.pm +++ b/lib/pf/UnifiedApi/Controller/Services.pm @@ -20,6 +20,7 @@ use pf::error qw(is_error); use pf::pfqueue::status_updater::redis; use pf::util::pfqueue qw(consumer_redis_client); use POSIX qw(setsid); +use pf::file_paths qw($pfperl_api_restart_task); sub resource { my ($self) = @_; @@ -144,12 +145,13 @@ sub do_action { my $service_id = $self->param('service_id'); # Marking the restart of pfperl-api as complete since it will be complete when running in a container if ($action eq 'do_restart' && $service_id eq 'pfperl-api') { - $updater->completed({restart => 0, pid => 0}); - my $data = $self->$action(); - } else { - my $data = $self->$action(); - $updater->completed($data); + if (open(my $fh, ">", $pfperl_api_restart_task)) { + print $fh $task_id; + close($fh); + } } + my $data = $self->$action(); + $updater->completed($data); }, sub {}, ); diff --git a/lib/pf/file_paths.pm b/lib/pf/file_paths.pm index 6e822a6440a2..4e81f8d6bb85 100644 --- a/lib/pf/file_paths.pm +++ b/lib/pf/file_paths.pm @@ -134,6 +134,7 @@ our ( $provisioning_filters_config_default_file, $provisioning_filters_meta_config_file, $provisioning_filters_meta_config_default_file, + $pfperl_api_restart_task, ); BEGIN { @@ -242,6 +243,7 @@ BEGIN { $provisioning_filters_config_default_file $provisioning_filters_meta_config_file $provisioning_filters_meta_config_default_file + $pfperl_api_restart_task ); } @@ -274,6 +276,7 @@ $systemd_unit_dir = "/usr/lib/systemd/system"; $acme_challenge_dir = catdir($conf_dir,"ssl/acme-challenge"); $conf_uploads = catdir($conf_dir, "uploads"); $api_i18n_dir = catdir($conf_dir, "I18N/api"); +$pfperl_api_restart_task = catdir($var_dir, "pfperl-api/restart-task"); $pfcmd_binary = catfile( $bin_dir, "pfcmd" ); diff --git a/sbin/pfperl-api-docker-wrapper b/sbin/pfperl-api-docker-wrapper index 14583e88ec4d..d12d22649708 100755 --- a/sbin/pfperl-api-docker-wrapper +++ b/sbin/pfperl-api-docker-wrapper @@ -20,6 +20,7 @@ args="$args --network=host" args="$args -e HOST_OS" args="$args -e DOCKER_NETWORK_IS_HOST" args="$args -v/usr/local/pf/var/conf/:/usr/local/pf/var/conf/" +args="$args -v /usr/local/pf/var/pfperl-api:/usr/local/pf/var/pfperl-api" if ! [ -z "$HTML_MOUNT" ]; then args="$args -v$HTML_MOUNT:/usr/local/pf/html" diff --git a/var/pfperl-api/.gitignore b/var/pfperl-api/.gitignore new file mode 100644 index 000000000000..5e7d2734cfc6 --- /dev/null +++ b/var/pfperl-api/.gitignore @@ -0,0 +1,4 @@ +# Ignore everything in this directory +* +# Except this file +!.gitignore From 66b2fbab1faa9d187bab80211c4ae23369df36fe Mon Sep 17 00:00:00 2001 From: Darren+Satkunas Date: Tue, 19 Nov 2024 14:26:22 +0000 Subject: [PATCH 111/176] update NEWS --- NEWS.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/NEWS.asciidoc b/NEWS.asciidoc index 5541a3f150db..03c45ec057c8 100644 --- a/NEWS.asciidoc +++ b/NEWS.asciidoc @@ -45,6 +45,7 @@ For a list of compatibility related changes see the < Date: Tue, 19 Nov 2024 10:37:13 -0500 Subject: [PATCH 112/176] Raise dhcp rate limiting to 5 minutes --- conf/pf.conf.defaults | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/pf.conf.defaults b/conf/pf.conf.defaults index ac7975cbbd11..931b1f7e446d 100644 --- a/conf/pf.conf.defaults +++ b/conf/pf.conf.defaults @@ -41,7 +41,7 @@ dhcpdetector=enabled # For example, a DHCPREQUEST for the same MAC/IP will only be processed once in the timeframe configured below. # This is independant of the DHCP server/relay handling the packet and is only based on the IP, MAC Address and DHCP type inside the packet. # A value of 0 will disable the rate limitation. -dhcp_rate_limiting=5s +dhcp_rate_limiting=300s # # network.rogue_dhcp_detection # From 5cd3dd91512c41e902174d0e6daeba68682baa72 Mon Sep 17 00:00:00 2001 From: Darren+Satkunas Date: Tue, 19 Nov 2024 18:14:37 +0000 Subject: [PATCH 113/176] sort switches by description in Nodes sidebar, fixes #8368 --- .../root/src/views/Nodes/index.vue | 46 ++++++++++--------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/html/pfappserver/root/src/views/Nodes/index.vue b/html/pfappserver/root/src/views/Nodes/index.vue index 23bc5267aa42..2893db623ad4 100644 --- a/html/pfappserver/root/src/views/Nodes/index.vue +++ b/html/pfappserver/root/src/views/Nodes/index.vue @@ -63,28 +63,30 @@ const setup = (props, context) => { return { name: switchGroup.id || i18n.t('Default'), collapsable: true, - items: switchGroup.members.map(switchGroupMember => { - let conditionAdvanced - if ((/^([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\/[0-9]{1,2})$/.exec(switchGroupMember.id))) { // CIDR - const [start, end] = network.cidrToRange(switchGroupMember.id) - conditionAdvanced = { op: 'and', values: [ - { op: 'or', values: [{ field: 'locationlog.switch_ip', op: 'greater_than_equals', value: start }] }, - { op: 'or', values: [{ field: 'locationlog.switch_ip', op: 'less_than_equals', value: end }] } - ] } - } - else if ((/^([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})$/.exec(switchGroupMember.id))) { // IPv4 - conditionAdvanced = { op: 'and', values: [{ op: 'or', values: [{ field: 'locationlog.switch_ip', op: 'equals', value: switchGroupMember.id }] }] } - } - else { // non-CIDR - conditionAdvanced = { op: 'and', values: [{ op: 'or', values: [{ field: 'locationlog.switch', op: 'equals', value: switchGroupMember.id }] }] } - } - return { - name: switchGroupMember.id, - caption: switchGroupMember.description, - path: { name: 'nodeSearch', query: { conditionAdvanced: JSON.stringify(conditionAdvanced) } } - } - }) - } + items: switchGroup.members + .sort((a, b) => (a.description||a.id).localeCompare(b.description||b.id)) + .map(switchGroupMember => { + let conditionAdvanced + if ((/^([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\/[0-9]{1,2})$/.exec(switchGroupMember.id))) { // CIDR + const [start, end] = network.cidrToRange(switchGroupMember.id) + conditionAdvanced = { op: 'and', values: [ + { op: 'or', values: [{ field: 'locationlog.switch_ip', op: 'greater_than_equals', value: start }] }, + { op: 'or', values: [{ field: 'locationlog.switch_ip', op: 'less_than_equals', value: end }] } + ] } + } + else if ((/^([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})$/.exec(switchGroupMember.id))) { // IPv4 + conditionAdvanced = { op: 'and', values: [{ op: 'or', values: [{ field: 'locationlog.switch_ip', op: 'equals', value: switchGroupMember.id }] }] } + } + else { // non-CIDR + conditionAdvanced = { op: 'and', values: [{ op: 'or', values: [{ field: 'locationlog.switch', op: 'equals', value: switchGroupMember.id }] }] } + } + return { + name: switchGroupMember.id, + caption: switchGroupMember.description, + path: { name: 'nodeSearch', query: { conditionAdvanced: JSON.stringify(conditionAdvanced) } } + } + }) + } }) } ])) From f84bb63651d3e5f426eabcf166ba683f937da300 Mon Sep 17 00:00:00 2001 From: Durand Date: Fri, 22 Nov 2024 10:41:02 -0500 Subject: [PATCH 114/176] pfacct improvement (#8369) * Added a queue for httpd.aaa call * Improve performance the pfacct accounting handling. * Remove the HTTP Workers * Handle overflows better * Default to the number of workers to twice the number of CPUs. * Change the call to the api to notify * Trace the radius request backlog and overflow in statsd * Removed useless fmt.Println --------- Co-authored-by: James Rouzier --- conf/pf.conf.defaults | 3 ++- go/cmd/pfacct/pfacct.go | 20 ++++++++++++++++++-- go/cmd/pfacct/radius.go | 28 ++++++++++++++++++++++------ 3 files changed, 42 insertions(+), 9 deletions(-) diff --git a/conf/pf.conf.defaults b/conf/pf.conf.defaults index 931b1f7e446d..6b6975fd3ba5 100644 --- a/conf/pf.conf.defaults +++ b/conf/pf.conf.defaults @@ -1412,7 +1412,8 @@ process_bandwidth_accounting = disabled # radius_configuration.pfacct_workers # # The number of workers proccessing accounting packets. -pfacct_workers = 5 +# If zero it will be twice the numbers of CPUs. +pfacct_workers = 0 # radius_configuration.pfacct_work_queue_size # # The size of the queue for each worker. diff --git a/go/cmd/pfacct/pfacct.go b/go/cmd/pfacct/pfacct.go index 891dc3f93501..280ad35f52fd 100644 --- a/go/cmd/pfacct/pfacct.go +++ b/go/cmd/pfacct/pfacct.go @@ -1,11 +1,14 @@ package main import ( + "cmp" "context" "database/sql" "fmt" "net" + "runtime" "strconv" + "sync/atomic" "time" cache "github.com/fdurand/go-cache" @@ -23,7 +26,6 @@ import ( ) const DefaultTimeDuration = 5 * time.Minute -const DefaultRadiusWorkers = 5 const DefaultRadiusWorkQueueSize = 1000 type radiusRequest struct { @@ -52,6 +54,7 @@ type PfAcct struct { StatsdOption statsd.Option StatsdClient *statsd.Client radiusRequests []chan<- radiusRequest + overflows []atomic.Int64 localSecret string StatsdOnce tryableonce.TryableOnce isProxied bool @@ -82,7 +85,6 @@ func NewPfAcct() *PfAcct { pfAcct := &PfAcct{ Db: Database, TimeDuration: DefaultTimeDuration, - RadiusWorkers: DefaultRadiusWorkers, RadiusWorkQueueSize: DefaultRadiusWorkQueueSize, } pfAcct.SwitchInfoCache = cache.New(5*time.Minute, 10*time.Minute) @@ -93,8 +95,10 @@ func NewPfAcct() *PfAcct { pfAcct.SetupConfig(ctx) pfAcct.radiusRequests = makeRadiusRequests(pfAcct, pfAcct.RadiusWorkers, pfAcct.RadiusWorkQueueSize) + pfAcct.overflows = make([]atomic.Int64, pfAcct.RadiusWorkers, pfAcct.RadiusWorkers) pfAcct.AAAClient = jsonrpc2.NewAAAClientFromConfig(ctx) //pfAcct.Dispatcher = NewDispatcher(16, 128) + pfAcct.runPing() return pfAcct } @@ -115,6 +119,7 @@ func makeRadiusRequests(h *PfAcct, requestFanOut, backlog int) []chan<- radiusRe } func (pfAcct *PfAcct) SetupConfig(ctx context.Context) { + numOfCpus := runtime.NumCPU() var keyConfNet pfconfigdriver.PfconfigKeys keyConfNet.PfconfigNS = "config::Network" pfconfigdriver.FetchDecodeSocket(ctx, &keyConfNet) @@ -174,6 +179,9 @@ func (pfAcct *PfAcct) SetupConfig(ctx context.Context) { pfAcct.RadiusWorkers = int(i) } + // If set to zero use twice the number of CPUs for the workers + pfAcct.RadiusWorkers = cmp.Or(pfAcct.RadiusWorkers, 2*numOfCpus) + if i, err := strconv.ParseInt(RadiusConfiguration.PfacctWorkQueueSize, 10, 64); err != nil { logWarn(ctx, fmt.Sprintf("Invalid number '%s' pfacct_work_queue_size defaulting to '%d'", RadiusConfiguration.PfacctWorkQueueSize, pfAcct.RadiusWorkQueueSize)) } else { @@ -230,6 +238,14 @@ func (pfAcct *PfAcct) runPing() { }(pfAcct) } +func (pfAcct *PfAcct) SendGauge(name string, val int) { + if pfAcct.StatsdClient == nil { + return + } + + pfAcct.StatsdClient.Gauge(name, val) +} + func isProxied(pfAcct *PfAcct) bool { return pfconfigdriver.GetClusterSummary(context.Background()).ClusterEnabled == 1 || pfAcct.radiusdAcctEnabled } diff --git a/go/cmd/pfacct/radius.go b/go/cmd/pfacct/radius.go index c8d69ae2c3ee..75aff5db5d79 100644 --- a/go/cmd/pfacct/radius.go +++ b/go/cmd/pfacct/radius.go @@ -75,7 +75,8 @@ func (h *PfAcct) HandleStatusServer(w radius.ResponseWriter, r *radius.Request) func (h *PfAcct) HandleAccounting(w radius.ResponseWriter, r *radius.Request) { ctx := r.Context() - defer h.NewTiming().Send("pfacct.HandleAccountingRequest") + timing := h.NewTiming() + defer timing.Send("pfacct.HandleAccountingRequest") status := rfc2866.AcctStatusType_Get(r.Packet) if status > rfc2866.AcctStatusType_Value_InterimUpdate { outPacket := r.Response(radius.CodeAccountingResponse) @@ -119,12 +120,23 @@ func (h *PfAcct) HandleAccounting(w radius.ResponseWriter, r *radius.Request) { func (h *PfAcct) sendRadiusRequestToQueue(rr radiusRequest) { queueIndex := djb2Hash(rr.mac[:]) % uint64(len(h.radiusRequests)) - h.radiusRequests[queueIndex] <- rr + select { + case h.radiusRequests[queueIndex] <- rr: + default: + go func() { + h.overflows[queueIndex].Add(1) + h.radiusRequests[queueIndex] <- rr + h.overflows[queueIndex].Add(-1) + }() + } + + h.SendGauge(fmt.Sprintf("pfacct.radiusRequests[%d]", queueIndex), len(h.radiusRequests[queueIndex])+int(h.overflows[queueIndex].Load())) } func (h *PfAcct) handleAccountingRequest(rr radiusRequest) { r, switchInfo, mac, status := rr.r, rr.switchInfo, rr.mac, rr.status - defer h.NewTiming().Send("pfacct.accounting." + rr.status.String()) + timing := h.NewTiming() + defer timing.Send("pfacct.accounting." + rr.status.String()) ctx := r.Context() in_bytes := int64(rfc2866.AcctInputOctets_Get(r.Packet)) out_bytes := int64(rfc2866.AcctOutputOctets_Get(r.Packet)) @@ -176,9 +188,9 @@ func (h *PfAcct) handleAccountingRequest(rr radiusRequest) { } } - h.sendRadiusAccounting(r, switchInfo) h.handleTimeBalance(r, switchInfo, unique_session_id) h.handleBandwidthBalance(r, switchInfo, in_bytes+out_bytes) + h.sendRadiusAccounting(rr, switchInfo) } func (h *PfAcct) handleTimeBalance(r *radius.Request, switchInfo *SwitchInfo, unique_session uint64) { @@ -304,7 +316,11 @@ func (h *PfAcct) accountingUniqueSessionId(r *radius.Request) uint64 { return hash.Sum64() } -func (h *PfAcct) sendRadiusAccounting(r *radius.Request, switchInfo *SwitchInfo) { +func (h *PfAcct) sendRadiusAccounting(rr radiusRequest, switchInfo *SwitchInfo) { + h.sendRadiusAccountingCall(rr.r) +} + +func (h *PfAcct) sendRadiusAccountingCall(r *radius.Request) { ctx := r.Context() attr := packetToMap(ctx, r.Packet) attr["PF_HEADERS"] = map[string]string{ @@ -317,7 +333,7 @@ func (h *PfAcct) sendRadiusAccounting(r *radius.Request, switchInfo *SwitchInfo) logWarn(ctx, fmt.Sprintf("Empty NAS-IP-Address, using the source IP address of the packet (%s)", attr["NAS-IP-Address"])) } - if _, err := h.AAAClient.Call(ctx, "radius_accounting", attr); err != nil { + if err := h.AAAClient.Notify(ctx, "radius_accounting", attr); err != nil { logError(ctx, err.Error()) } } From 79d2903bfd52a4b6d24e31dd25bbbbd6ce27d752 Mon Sep 17 00:00:00 2001 From: James Rouzier Date: Fri, 22 Nov 2024 10:45:55 -0500 Subject: [PATCH 115/176] Stringify the id returned (#7395) --- lib/pf/UnifiedApi/Controller/Crud.pm | 9 +++++++++ lib/pf/UnifiedApi/Controller/SecurityEvents.pm | 11 +++++++++++ 2 files changed, 20 insertions(+) diff --git a/lib/pf/UnifiedApi/Controller/Crud.pm b/lib/pf/UnifiedApi/Controller/Crud.pm index 4c72e11f1dcf..42e38e6a9971 100644 --- a/lib/pf/UnifiedApi/Controller/Crud.pm +++ b/lib/pf/UnifiedApi/Controller/Crud.pm @@ -263,12 +263,21 @@ sub create_error_msg { "Unable to create resource" } +=head2 pre_render_create + +pre_render_create + +=cut + +sub pre_render_create { } + sub render_create { my ($self, $status, $obj) = @_; if (is_error($status)) { return $self->render_error($status, $obj->{message}, $obj->{errors}); } + $self->pre_render_create($obj); my $id = $obj->{$self->primary_key}; my $location_id = $id; $location_id =~ s#/#~#g; diff --git a/lib/pf/UnifiedApi/Controller/SecurityEvents.pm b/lib/pf/UnifiedApi/Controller/SecurityEvents.pm index ab5e425321f5..30f89cb99737 100644 --- a/lib/pf/UnifiedApi/Controller/SecurityEvents.pm +++ b/lib/pf/UnifiedApi/Controller/SecurityEvents.pm @@ -114,6 +114,17 @@ sub per_device_class_pending { return $self->_per_device_class_status('pending'); } +=head2 pre_render_create + +pre_render_create + +=cut + +sub pre_render_create { + my ($self, $data) = @_; + $data->{$self->primary_key} .= ""; +} + =head1 AUTHOR Inverse inc. From 14fc52af78b5d367eda543583caffbda5c67c8c4 Mon Sep 17 00:00:00 2001 From: Durand Date: Fri, 22 Nov 2024 10:47:11 -0500 Subject: [PATCH 116/176] If deauthOnPrevious is enable then bypass the web_form_release (#7319) Co-authored-by: Satkunas <3904468+satkunas@users.noreply.github.com> --- conf/switches.conf.defaults | 3 ++- .../captiveportal/PacketFence/DynamicRouting/Module/Root.pm | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/conf/switches.conf.defaults b/conf/switches.conf.defaults index 8a051cc32dab..0ba5dbcde038 100644 --- a/conf/switches.conf.defaults +++ b/conf/switches.conf.defaults @@ -94,9 +94,10 @@ useCoA=Y radiusDeauthUseConnector=Y VoIPLLDPDetect=Y PostMfaValidation=N +deauthOnPrevious=N # ACLs UsePushACLs=N UseDownloadableACLs=N ACLsLimit=20 -DownloadableACLsLimit=384 +DownloadableACLsLimit=384 \ No newline at end of file diff --git a/html/captive-portal/lib/captiveportal/PacketFence/DynamicRouting/Module/Root.pm b/html/captive-portal/lib/captiveportal/PacketFence/DynamicRouting/Module/Root.pm index 2fdb3fa7c61e..32f552047617 100644 --- a/html/captive-portal/lib/captiveportal/PacketFence/DynamicRouting/Module/Root.pm +++ b/html/captive-portal/lib/captiveportal/PacketFence/DynamicRouting/Module/Root.pm @@ -116,6 +116,9 @@ sub handle_web_form_release { $switch = pf::SwitchFactory->instantiate($last_switch_id); } } + if (isenabled($switch->{_deauthOnPrevious})) { + return $FALSE; + } my $session = new pf::Portal::Session(client_mac => $self->current_mac)->session; if(defined($switch) && $switch && $switch->supportsWebFormRegistration && defined($session->param('is_external_portal')) && $session->param('is_external_portal')){ get_logger->info("(" . $switch->{_id} . ") supports web form release. Will use this method to authenticate"); From 41ac29f5a0f8374972ef8a88c370547f3becdf8f Mon Sep 17 00:00:00 2001 From: Scott Date: Sat, 23 Nov 2024 02:49:08 +1100 Subject: [PATCH 117/176] don't resolve IPs for LDAP source using SSL and requiring verification (#6808) --- lib/pf/Authentication/Source/LDAPSource.pm | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/pf/Authentication/Source/LDAPSource.pm b/lib/pf/Authentication/Source/LDAPSource.pm index 39fe2b473e4a..1af39aa86aee 100644 --- a/lib/pf/Authentication/Source/LDAPSource.pm +++ b/lib/pf/Authentication/Source/LDAPSource.pm @@ -244,8 +244,14 @@ sub _connect { my $connection; my $logger = Log::Log4perl::get_logger(__PACKAGE__); my $LDAPServer; - # Lookup the server hostnames to IPs so they can be shuffled better and to improve the failure detection - my @LDAPServers = map { valid_ip($_) ? $_ : @{resolve($_) // []} } @{$self->{'host'} // []}; + my @LDAPServers; + if ($self->{'encryption'} eq SSL && $self->{'verify'} eq 'require') { + # Not expanding hostnames in order to allow LDAPS to send SNI header and verify hostname header against certifcate (at the cost of IP based round robin) + @LDAPServers = @{$self->{'host'} // []}; + } else { + # Lookup the server hostnames to IPs so they can be shuffled better and to improve the failure detection + @LDAPServers = map { valid_ip($_) ? $_ : @{resolve($_) // []} } @{$self->{'host'} // []}; + } if ($self->shuffle) { @LDAPServers = List::Util::shuffle @LDAPServers; } From 4e634bb964ef249d0006eebf41f20a2308e47297 Mon Sep 17 00:00:00 2001 From: Nicolas Quiniou-Briand Date: Fri, 22 Nov 2024 16:57:40 +0100 Subject: [PATCH 118/176] Added Aruba-MPSK-password attribute (#6957) * Added Aruba-MPSK-password attribute * fix typo * Call SUPER::returnRadiusAccessAccept --------- Co-authored-by: Durand Fabrice --- lib/pf/Switch/Aruba.pm | 73 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/lib/pf/Switch/Aruba.pm b/lib/pf/Switch/Aruba.pm index a19baab63be2..83390368b9db 100644 --- a/lib/pf/Switch/Aruba.pm +++ b/lib/pf/Switch/Aruba.pm @@ -657,6 +657,79 @@ sub returnAuthorizeRead { return [$status, %$radius_reply_ref]; } +=item returnRadiusAccessAccept + +Prepares the RADIUS Access-Accept response for the network device. + +Default implementation. + +=cut + +sub returnRadiusAccessAccept { + my ($self, $args) = @_; + my $logger = $self->logger(); + + $args->{'unfiltered'} = $TRUE; + $self->compute_action(\$args); + my @super_reply = @{$self->SUPER::returnRadiusAccessAccept($args)}; + my $status = shift @super_reply; + my %radius_reply = @super_reply; + my $radius_reply_ref = \%radius_reply; + return [$status, %$radius_reply_ref] if($status == $RADIUS::RLM_MODULE_USERLOCK); + + # Inline Vs. VLAN enforcement + my $role = ""; + if ( (!$args->{'wasInline'} || ($args->{'wasInline'} && $args->{'vlan'} != 0) ) && isenabled($self->{_VlanMap})) { + if(defined($args->{'vlan'}) && $args->{'vlan'} ne "" && $args->{'vlan'} ne 0){ + $logger->info("(".$self->{'_id'}.") Added VLAN $args->{'vlan'} to the returned RADIUS Access-Accept"); + $radius_reply_ref = { + 'Tunnel-Medium-Type' => $RADIUS::ETHERNET, + 'Tunnel-Type' => $RADIUS::VLAN, + 'Tunnel-Private-Group-ID' => $args->{'vlan'} . "", + }; + } + else { + $logger->debug("(".$self->{'_id'}.") Received undefined VLAN. No VLAN added to RADIUS Access-Accept"); + } + } + + if ( isenabled($self->{_RoleMap}) && $self->supportsRoleBasedEnforcement()) { + $logger->debug("Network device (".$self->{'_id'}.") supports roles. Evaluating role to be returned"); + if ( defined($args->{'user_role'}) && $args->{'user_role'} ne "" ) { + $role = $self->getRoleByName($args->{'user_role'}); + } + if ( defined($role) && $role ne "" ) { + $radius_reply_ref = { + %$radius_reply_ref, + $self->returnRoleAttributes($role), + }; + $logger->info( + "(".$self->{'_id'}.") Added role $role to the returned RADIUS Access-Accept" + ); + } + else { + $logger->debug("(".$self->{'_id'}.") Received undefined role. No Role added to RADIUS Access-Accept"); + } + } + + if ($args->{profile}->dpskEnabled()) { + if (defined($args->{owner}->{psk})) { + $radius_reply_ref->{'Aruba-MPSK-Passphrase'} = $args->{owner}->{psk}; + } else { + $radius_reply_ref->{'Aruba-MPSK-Passphrase'} = $args->{profile}->{_default_psk_key}; + } + } + + my $status = $RADIUS::RLM_MODULE_OK; + if (!isenabled($args->{'unfiltered'})) { + my $filter = pf::access_filter::radius->new; + my $rule = $filter->test('returnRadiusAccessAccept', $args); + ($radius_reply_ref, $status) = $filter->handleAnswerInRule($rule,$args,$radius_reply_ref); + } + + return [$status, %$radius_reply_ref]; +} + =item =cut From c3b5e6607684c2fa20a30ed6d816585bb4de7370 Mon Sep 17 00:00:00 2001 From: Darren+Satkunas Date: Fri, 22 Nov 2024 16:49:37 +0000 Subject: [PATCH 119/176] update NEWS --- NEWS.asciidoc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/NEWS.asciidoc b/NEWS.asciidoc index 03c45ec057c8..07bc69ea0ea5 100644 --- a/NEWS.asciidoc +++ b/NEWS.asciidoc @@ -38,6 +38,8 @@ For a list of compatibility related changes see the < Date: Fri, 22 Nov 2024 17:26:45 +0000 Subject: [PATCH 120/176] Removed the WiSM modules and Updated the to-14.1-convert-ciscoWLC-switch-types.pl script --- .../to-14.1-convert-ciscoWLC-switch-types.pl | 2 + lib/pf/Switch/Cisco/WiSM.pm | 55 ------------------- lib/pf/Switch/Cisco/WiSM2.pm | 55 ------------------- 3 files changed, 2 insertions(+), 110 deletions(-) delete mode 100644 lib/pf/Switch/Cisco/WiSM.pm delete mode 100644 lib/pf/Switch/Cisco/WiSM2.pm diff --git a/addons/upgrade/to-14.1-convert-ciscoWLC-switch-types.pl b/addons/upgrade/to-14.1-convert-ciscoWLC-switch-types.pl index 8af06bec4f81..02f572f70a58 100755 --- a/addons/upgrade/to-14.1-convert-ciscoWLC-switch-types.pl +++ b/addons/upgrade/to-14.1-convert-ciscoWLC-switch-types.pl @@ -36,6 +36,8 @@ =head1 DESCRIPTION 'Cisco::WLC_2500' => 'Cisco::Cisco_WLC_AireOS', 'Cisco::WLC_4400' => 'Cisco::Cisco_WLC_AireOS', 'Cisco::WLC_5500' => 'Cisco::Cisco_WLC_AireOS', + 'Cisco::WiSM' => 'Cisco::Cisco_WLC_AireOS', + 'Cisco::WiSM2' => 'Cisco::Cisco_WLC_AireOS', ); my $cs = pf::IniFiles->new(-file => $file, -allowempty => 1); diff --git a/lib/pf/Switch/Cisco/WiSM.pm b/lib/pf/Switch/Cisco/WiSM.pm deleted file mode 100644 index 0ed8fb66329b..000000000000 --- a/lib/pf/Switch/Cisco/WiSM.pm +++ /dev/null @@ -1,55 +0,0 @@ -package pf::Switch::Cisco::WiSM; - -=head1 NAME - -pf::Switch::Cisco::WiSM - Object oriented module to parse SNMP traps and manage Cisco Wireless Services Module (WiSM) - -=head1 STATUS - -This module is currently only a placeholder, see L for relevant support items. - -It should work on all 6500 WiSM modules and maybe 7500. - -=cut - -use strict; -use warnings; - -use Net::SNMP; - -use base ('pf::Switch::Cisco::WLC'); - -sub description { 'Cisco WiSM' } - -=head1 AUTHOR - -Inverse inc. - -=head1 COPYRIGHT - -Copyright (C) 2005-2024 Inverse inc. - -=head1 LICENSE - -This program is free software; you can redistribute it and/or -modify it under the terms of the GNU General Public License -as published by the Free Software Foundation; either version 2 -of the License, or (at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, -USA. - -=cut - -1; - -# vim: set shiftwidth=4: -# vim: set expandtab: -# vim: set backspace=indent,eol,start: diff --git a/lib/pf/Switch/Cisco/WiSM2.pm b/lib/pf/Switch/Cisco/WiSM2.pm deleted file mode 100644 index 1781d8700dd5..000000000000 --- a/lib/pf/Switch/Cisco/WiSM2.pm +++ /dev/null @@ -1,55 +0,0 @@ -package pf::Switch::Cisco::WiSM2; - -=head1 NAME - -pf::Switch::Cisco::WiSM2 - Object oriented module to parse SNMP traps and manage Cisco Wireless Services Module (WiSM2) - -=head1 STATUS - -This module is currently only a placeholder, see L for relevant support items. - -It should work on all 6500 WiSM2 modules and maybe 7500. - -=cut - -use strict; -use warnings; - -use Net::SNMP; - -use base ('pf::Switch::Cisco::WLC'); - -sub description { 'Cisco WiSM2' } - -=head1 AUTHOR - -Inverse inc. - -=head1 COPYRIGHT - -Copyright (C) 2005-2024 Inverse inc. - -=head1 LICENSE - -This program is free software; you can redistribute it and/or -modify it under the terms of the GNU General Public License -as published by the Free Software Foundation; either version 2 -of the License, or (at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, -USA. - -=cut - -1; - -# vim: set shiftwidth=4: -# vim: set expandtab: -# vim: set backspace=indent,eol,start: From c95324a61c86f875f864be39ee6c1325de1fb6fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cabprasa=E2=80=9D?= <123509565+abprasa@users.noreply.github.com> Date: Mon, 25 Nov 2024 08:53:22 +0000 Subject: [PATCH 121/176] Updated the cisco_ap.asciidoc --- docs/network/networkdevice/cisco_ap.asciidoc | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/network/networkdevice/cisco_ap.asciidoc b/docs/network/networkdevice/cisco_ap.asciidoc index ea26f7f615d7..1d29c87d5093 100644 --- a/docs/network/networkdevice/cisco_ap.asciidoc +++ b/docs/network/networkdevice/cisco_ap.asciidoc @@ -141,9 +141,9 @@ aaa server radius dynamic-author To be contributed... -==== Wireless LAN Controller (WLC) or Wireless Services Module (WiSM) +==== Wireless LAN Controller (AireOS) -In this section, we cover the basic configuration of the WiSM for PacketFence using the web interface. +In this section, we cover the basic configuration of the WLC for PacketFence using the web interface. * First, globally define the FreeRADIUS server running on PacketFence (PacketFence's IP) and make sure _Support for RFC 3576_ (also called _Support for CoA_) is enabled. When the option is missing from your WLC, it is enabled by default. @@ -197,9 +197,9 @@ WARNING: When creating interfaces, it's important to configure DHCP servers. Oth You are good to go! -===== Wireless LAN Controller (WLC) Web Auth +===== Wireless LAN Controller (AireOS) Web Auth -In this section, we cover the basic configuration of the WLC Web Auth for PacketFence using the web interface. +In this section, we cover the basic configuration of the WLC AireOS Web Auth for PacketFence using the web interface. The idea is to forward the device to the captive portal with an ACL if the device is in an unreg state and allow the device to reach Internet (or the normal network) by changing the ACL once registered. In the unreg state, the WLC will intercept the HTTP traffic and forward the device to the captive portal. @@ -236,7 +236,7 @@ one to allow anything (Authorize_any) . image::ACL.png[scaledwidth="100%",alt="ACL"] -* Then the last step is to configure the WLC in PacketFence. +* Then the last step is to configure the WLC AireOS in PacketFence. Role by Web Auth URL image::wlc_packetfence.png[scaledwidth="100%",alt="ACL"] @@ -245,10 +245,10 @@ Role definition image::wlc_packetfence2.png[scaledwidth="100%",alt="ACL"] -===== Wireless LAN Controller (WLC) IPSK +===== Wireless LAN Controller (AireOS) IPSK -In this section, we cover the basic configuration of the WLC IPSK feature. -Starting from WLC 8.5 release, Cisco introduces the IPSK feature. +In this section, we cover the basic configuration of the WLC AireOS IPSK feature. +Starting from WLC AireOS 8.5 release, Cisco introduces the IPSK feature. Identity PSKs are unique pre-shared keys created for individuals or groups of users on the same SSID. In this section we will cover the WLC configuration and the PacketFence configuration. @@ -282,7 +282,7 @@ PacketFence Configuration: image::dpsk_provisioner.png[scaledwidth="100%",alt="Provisioner IPKS"] -==== Wireless LAN Controller (WLC) 9800 +==== Wireless LAN Controller (IOS XE) 9800 ===== General RADIUS Configuration From 2e809e981bf17208b82a1696468443ef593e5fb9 Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Mon, 25 Nov 2024 05:46:42 -0500 Subject: [PATCH 122/176] Use new Radius audit log method for eduroam --- conf/radiusd/eduroam.example | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/conf/radiusd/eduroam.example b/conf/radiusd/eduroam.example index 7571cc217d4f..961960d3a64a 100644 --- a/conf/radiusd/eduroam.example +++ b/conf/radiusd/eduroam.example @@ -345,10 +345,10 @@ post-auth { if (updated || ok || noop) { request-timing - -sql + packetfence-audit-log-accept } else { request-timing - -sql_reject + packetfence-audit-log-reject } } attr_filter.packetfence_post_auth @@ -365,7 +365,7 @@ post-auth { Post-Auth-Type REJECT { request-timing # log failed authentications in SQL, too. - -sql_reject + packetfence-audit-log-reject attr_filter.access_reject attr_filter.packetfence_post_auth From 89626e79041a6c5ba952fa74e0f7f7f4fc9d4ec6 Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Mon, 25 Nov 2024 08:24:41 -0500 Subject: [PATCH 123/176] Simple quote all secret in freeradius --- lib/pf/services/manager/radiusd_child.pm | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/lib/pf/services/manager/radiusd_child.pm b/lib/pf/services/manager/radiusd_child.pm index bcad8c268614..3ae4f800f791 100644 --- a/lib/pf/services/manager/radiusd_child.pm +++ b/lib/pf/services/manager/radiusd_child.pm @@ -216,7 +216,7 @@ home_server pf.remote { ipaddr = $radius_remote src_ipaddr = $tags{'management_ip'} port = 1812 - secret = $local_secret + secret = '$local_secret' response_window = 6 status_check = status-server revive_interval = 120 @@ -626,7 +626,7 @@ EOT $tags{'config'} .= <<"EOT"; client eduroam_tlrs_server_$i { ipaddr = $radius_ip - secret = $radius_secret + secret = '$radius_secret' shortname = eduroam_tlrs$i virtual_server = $virtual_server } @@ -898,7 +898,7 @@ EOT oauth2 { discovery = "https://login.microsoftonline.com/%{Realm}/v2.0" client_id = "$client_id" - client_secret = "$client_secret" + client_secret = '$client_secret' cache_password = yes } EOT @@ -1043,7 +1043,7 @@ EOT home_server $radius { ipaddr = $source->{'host'} port = $source->{'port'} -secret = $source->{'secret'} +secret = '$source->{'secret'}' $source->{'options'} } @@ -1166,7 +1166,7 @@ home_server pf$i.cluster { ipaddr = $radius_back src_ipaddr = $cluster_ip port = $self->{auth_port} - secret = $local_secret + secret = '$local_secret' response_window = 6 status_check = status-server check_interval = 20 @@ -1179,7 +1179,7 @@ home_server pf$i.cli.cluster { ipaddr = $radius_back src_ipaddr = $cluster_ip port = $self->{cli_port} - secret = $local_secret + secret = '$local_secret' response_window = 60 status_check = status-server check_interval = 20 @@ -1216,7 +1216,7 @@ home_server eduroam$i.cluster { ipaddr = $radius_back src_ipaddr = $cluster_ip port = $self->{eduroam_port} - secret = $local_secret + secret = '$local_secret' response_window = 6 status_check = status-server revive_interval = 120 @@ -1364,7 +1364,7 @@ EOT $tags{'config'} .= <<"EOT"; client $radius_back { ipaddr = $radius_back - secret = $local_secret + secret = '$local_secret' port = $self->{eduroam_port} shortname = pf } @@ -1381,7 +1381,7 @@ EOT $tags{'config'} .= <<"EOT"; client $management_ip { ipaddr = $management_ip - secret = $local_secret + secret = '$local_secret' shortname = pf } EOT From 6b87c3a6e51d5234599388fb5c67726c2c44bdaf Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Mon, 25 Nov 2024 08:47:06 -0500 Subject: [PATCH 124/176] Set Bool as int --- lib/pf/Authentication/Source/LDAPSource.pm | 6 +++--- lib/pf/Authentication/Source/RADIUSSource.pm | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/pf/Authentication/Source/LDAPSource.pm b/lib/pf/Authentication/Source/LDAPSource.pm index 1af39aa86aee..f211c1882da3 100644 --- a/lib/pf/Authentication/Source/LDAPSource.pm +++ b/lib/pf/Authentication/Source/LDAPSource.pm @@ -71,10 +71,10 @@ has 'usernameattribute' => (isa => 'Str', is => 'rw', required => 1); has 'searchattributes' => (isa => 'ArrayRef[Str]', is => 'rw', required => 0); has 'append_to_searchattributes' => (isa => 'Maybe[Str]', is => 'rw', required => 0); has '_cached_connection' => (is => 'rw'); -has 'cache_match' => ( isa => 'Bool', is => 'rw', default => '0' ); +has 'cache_match' => ( isa => 'Bool', is => 'rw', default => 0 ); has 'email_attribute' => (isa => 'Maybe[Str]', is => 'rw', default => 'mail'); -has 'monitor' => ( isa => 'Bool', is => 'rw', default => '1' ); -has 'shuffle' => ( isa => 'Bool', is => 'rw', default => '0' ); +has 'monitor' => ( isa => 'Bool', is => 'rw', default => 1 ); +has 'shuffle' => ( isa => 'Bool', is => 'rw', default => 0 ); has 'dead_duration' => ( isa => 'Num', is => 'rw', default => $DEFAULT_LDAP_DEAD_DURATION); has 'client_cert_file' => ( isa => 'Maybe[Str]', is => 'rw', default => ""); has 'client_key_file' => ( isa => 'Maybe[Str]', is => 'rw', default => ""); diff --git a/lib/pf/Authentication/Source/RADIUSSource.pm b/lib/pf/Authentication/Source/RADIUSSource.pm index ad014dde2658..37262a5f332b 100644 --- a/lib/pf/Authentication/Source/RADIUSSource.pm +++ b/lib/pf/Authentication/Source/RADIUSSource.pm @@ -32,7 +32,7 @@ has 'host' => (isa => 'Maybe[Str]', is => 'rw', default => '127.0.0.1'); has 'port' => (isa => 'Maybe[Int]', is => 'rw', default => 1812); has 'timeout' => (isa => 'Maybe[Int]', is => 'rw', default => 1); has 'secret' => (isa => 'Str', is => 'rw', required => 1); -has 'monitor' => ( isa => 'Bool', is => 'rw', default => '1' ); +has 'monitor' => ( isa => 'Bool', is => 'rw', default => 1 ); has 'options' => (isa => 'Str', is => 'rw', required => 1); has 'use_connector' => (isa => 'Bool', is => 'rw', default => 1); has 'nas_ip_address' => (isa => 'Maybe[Str]', is => 'rw', default => ''); From cabd8a0bd6ca0dc781438e6ce7a73ab30f7f6ef3 Mon Sep 17 00:00:00 2001 From: James Rouzier Date: Mon, 25 Nov 2024 10:11:43 -0500 Subject: [PATCH 125/176] Reduce time to flush the radius log (#8397) --- conf/pfcron.conf.defaults | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/conf/pfcron.conf.defaults b/conf/pfcron.conf.defaults index f8a1fcd0e486..71a1ef40e9d6 100644 --- a/conf/pfcron.conf.defaults +++ b/conf/pfcron.conf.defaults @@ -1042,7 +1042,7 @@ description=Radius Audit Log from redis to the database # schedule # # The schedule of task -schedule=@every 1m +schedule=@every 10s # # batch # @@ -1052,7 +1052,7 @@ batch=100 # timeout # # How long a flush_radius_audit_log job can run -timeout=10s +timeout=8s # # local # From 316b9a79d8659a09fcb0218b6a970fe2c53b1d7b Mon Sep 17 00:00:00 2001 From: James Rouzier Date: Mon, 25 Nov 2024 22:04:14 +0000 Subject: [PATCH 126/176] fix typo --- lib/pf/Switch/MockedSwitch.pm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/pf/Switch/MockedSwitch.pm b/lib/pf/Switch/MockedSwitch.pm index 9f0ab8205326..cfcd17abc73f 100644 --- a/lib/pf/Switch/MockedSwitch.pm +++ b/lib/pf/Switch/MockedSwitch.pm @@ -3479,7 +3479,7 @@ sub populateAccessPointMACIP { my $logger = $self->logger; $logger->warn("populateAccessPointMACIP not implemented for this switch module"); - return $false; + return $FALSE; } =back From 42bbb2e2f0043b68d50b87340a4beff47a6b273e Mon Sep 17 00:00:00 2001 From: bmp96 <119009345+bmp96@users.noreply.github.com> Date: Tue, 26 Nov 2024 17:21:52 +0100 Subject: [PATCH 127/176] =?UTF-8?q?Rewrote=20part=20of=20H3C's=20NasPortTo?= =?UTF-8?q?IfIndex=20subroutine=20to=20fix=20behaviour=20fo=E2=80=A6=20(#8?= =?UTF-8?q?062)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Reworte part of H3C's NasPortToIfIndex subroutine to fix behaviour for stacked Comware v7 switches * Fixed function dependency --- lib/pf/Switch/H3C/Comware_v7.pm | 47 +++++++++++++++++++++++++++++++++ lib/pf/Switch/constants.pm | 1 + 2 files changed, 48 insertions(+) diff --git a/lib/pf/Switch/H3C/Comware_v7.pm b/lib/pf/Switch/H3C/Comware_v7.pm index 8798e2bf835e..3f1c28a7755c 100644 --- a/lib/pf/Switch/H3C/Comware_v7.pm +++ b/lib/pf/Switch/H3C/Comware_v7.pm @@ -13,11 +13,58 @@ This module is currently only a placeholder, see L. use strict; use warnings; +use POSIX; + use base ('pf::Switch::H3C::Comware_v5'); sub description { 'Comware v7' } +=head1 SUBROUTINES + +=over + +=item NasPortToIfIndex + +Translate RADIUS NAS-Port into switch's ifIndex. + +=cut + +sub NasPortToIfIndex { + my ($self, $nas_port) = @_; + my $logger = $self->logger; + + # 4096 NAS-Port slots are reserved per physical ports, + # I'm assuming that each client will get a +1 so I translate all of them into the same ifIndex + # Also there's a large offset (16781312), 4096 * (4096 + 1) + # VLAN ID are last 3 nibbles ────────────────┐ + # Port is next 2 nibbles ────────────┐ │ + # Subslot is next 1 nibble ──────────┐ │ │ + # Slot is next 2 nibbles ───────┐ │ │ │ + # Example: 33575422 --to hex--> (02)(0)(05)(1FE) + my $nas_port_no_vlan = floor($nas_port / $THREECOM::NAS_PORTS_PER_PORT_RANGE); + my $slot = floor($nas_port_no_vlan / $THREECOM::NAS_PORTS_PER_PORT_RANGE); + my $port = $nas_port_no_vlan - $THREECOM::NAS_PORTS_PER_PORT_RANGE * $slot; + my $ifIndex = $port + $THREECOM::IFINDEX_OFFSET_PER_SLOT * ($slot - 1); + if ($ifIndex > 0) { + + # TODO we should think about caching or pre-computation here + $ifIndex = $self->getIfIndexForThisDot1dBasePort($ifIndex); + + # return if defined and an int + return $ifIndex if (defined($ifIndex) && $ifIndex =~ /^\d+$/); + } + + # error reporting + $logger->warn( + "Unknown NAS-Port format. ifIndex translation could have failed. " + . "VLAN re-assignment and switch/port accounting will be affected." + ); + return $nas_port; +} + +=back + =head1 AUTHOR Inverse inc. diff --git a/lib/pf/Switch/constants.pm b/lib/pf/Switch/constants.pm index 713f340c5fbf..ebf53152124d 100644 --- a/lib/pf/Switch/constants.pm +++ b/lib/pf/Switch/constants.pm @@ -454,6 +454,7 @@ Used for NAS-Port to ifIndex translation Readonly::Scalar our $NAS_PORT_OFFSET => 16781312; Readonly::Scalar our $NAS_PORTS_PER_PORT_RANGE => 4096; +Readonly::Scalar our $IFINDEX_OFFSET_PER_SLOT => 65; =head1 BROCADE From a055cc0d78a831fad7e27a44b070ed12ce2c3443 Mon Sep 17 00:00:00 2001 From: Darren+Satkunas Date: Wed, 27 Nov 2024 18:52:37 +0000 Subject: [PATCH 128/176] update NEWS --- NEWS.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/NEWS.asciidoc b/NEWS.asciidoc index 07bc69ea0ea5..111040544605 100644 --- a/NEWS.asciidoc +++ b/NEWS.asciidoc @@ -40,6 +40,7 @@ For a list of compatibility related changes see the < Date: Wed, 27 Nov 2024 18:54:20 +0000 Subject: [PATCH 129/176] update NEWS --- NEWS.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/NEWS.asciidoc b/NEWS.asciidoc index 111040544605..7252855b7367 100644 --- a/NEWS.asciidoc +++ b/NEWS.asciidoc @@ -51,6 +51,7 @@ For a list of compatibility related changes see the < Date: Wed, 27 Nov 2024 18:57:54 +0000 Subject: [PATCH 130/176] update NEWS --- NEWS.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/NEWS.asciidoc b/NEWS.asciidoc index 7252855b7367..18c485a64046 100644 --- a/NEWS.asciidoc +++ b/NEWS.asciidoc @@ -29,6 +29,7 @@ For a list of compatibility related changes see the < Date: Wed, 27 Nov 2024 19:10:54 +0000 Subject: [PATCH 131/176] update NEWS --- NEWS.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/NEWS.asciidoc b/NEWS.asciidoc index 18c485a64046..df0aa8aeb066 100644 --- a/NEWS.asciidoc +++ b/NEWS.asciidoc @@ -37,6 +37,7 @@ For a list of compatibility related changes see the < Date: Wed, 27 Nov 2024 14:12:55 -0500 Subject: [PATCH 132/176] fix: html/pfappserver/root/package.json & html/pfappserver/root/package-lock.json to reduce vulnerabilities (#8404) The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-JS-AXIOS-6671926 Co-authored-by: snyk-bot --- html/pfappserver/root/package-lock.json | 9 +++++---- html/pfappserver/root/package.json | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/html/pfappserver/root/package-lock.json b/html/pfappserver/root/package-lock.json index d4e0add0b0b6..592e28acc894 100644 --- a/html/pfappserver/root/package-lock.json +++ b/html/pfappserver/root/package-lock.json @@ -10,7 +10,7 @@ "dependencies": { "@vue/composition-api": "^1.7.2", "autoprefixer": "10.4.5", - "axios": "1.7.4", + "axios": "^1.7.8", "bootstrap": "4.6.1", "bootstrap-vue": "2.23.1", "core-js": "3.37.1", @@ -6508,9 +6508,10 @@ } }, "node_modules/axios": { - "version": "1.7.4", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.4.tgz", - "integrity": "sha512-DukmaFRnY6AzAALSH4J2M3k6PkaC+MfaAGdEERRWcC9q3/TWQwLpHR8ZRLKTdQ3aBDL64EdluRDjJqKw+BPZEw==", + "version": "1.7.8", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.8.tgz", + "integrity": "sha512-Uu0wb7KNqK2t5K+YQyVCLM76prD5sRFjKHbJYCP1J7JFGEQ6nN7HWn9+04LAeiJ3ji54lgS/gZCH1oxyrf1SPw==", + "license": "MIT", "dependencies": { "follow-redirects": "^1.15.6", "form-data": "^4.0.0", diff --git a/html/pfappserver/root/package.json b/html/pfappserver/root/package.json index 93bef19a433b..d37711d396c4 100644 --- a/html/pfappserver/root/package.json +++ b/html/pfappserver/root/package.json @@ -12,7 +12,7 @@ "dependencies": { "@vue/composition-api": "^1.7.2", "autoprefixer": "10.4.5", - "axios": "1.7.4", + "axios": "1.7.8", "bootstrap": "4.6.1", "bootstrap-vue": "2.23.1", "core-js": "3.37.1", From 414057d6108d9f337a95e9ebbe47255047b9254b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Nov 2024 14:14:04 -0500 Subject: [PATCH 133/176] Bump elliptic from 6.5.7 to 6.6.0 in /html/pfappserver/root (#8371) Bumps [elliptic](https://github.com/indutny/elliptic) from 6.5.7 to 6.6.0. - [Commits](https://github.com/indutny/elliptic/compare/v6.5.7...v6.6.0) --- updated-dependencies: - dependency-name: elliptic dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- html/pfappserver/root/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/html/pfappserver/root/package-lock.json b/html/pfappserver/root/package-lock.json index 592e28acc894..144ca5d45824 100644 --- a/html/pfappserver/root/package-lock.json +++ b/html/pfappserver/root/package-lock.json @@ -9429,9 +9429,9 @@ "integrity": "sha512-kpLJJi3zxTR1U828P+LIUDZ5ohixyo68/IcYOHLqnbTPr/wdgn4i1ECvmALN9E16JPA6cvCG5UG79gVwVdEK5w==" }, "node_modules/elliptic": { - "version": "6.5.7", - "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.7.tgz", - "integrity": "sha512-ESVCtTwiA+XhY3wyh24QqRGBoP3rEdDUl3EDUUo9tft074fi19IrdpH7hLCMMP3CIj7jb3W96rn8lt/BqIlt5Q==", + "version": "6.6.0", + "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.6.0.tgz", + "integrity": "sha512-dpwoQcLc/2WLQvJvLRHKZ+f9FgOdjnq11rurqwekGQygGPsYSK29OMMD2WalatiqQ+XGFDglTNixpPfI+lpaAA==", "dev": true, "dependencies": { "bn.js": "^4.11.9", From a0686bfabe5e1992a720bb258dc3a73a145a8003 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Nov 2024 14:15:11 -0500 Subject: [PATCH 134/176] Bump cookie and express in /html/pfappserver/root (#8353) Bumps [cookie](https://github.com/jshttp/cookie) and [express](https://github.com/expressjs/express). These dependencies needed to be updated together. Updates `cookie` from 0.6.0 to 0.7.1 - [Release notes](https://github.com/jshttp/cookie/releases) - [Commits](https://github.com/jshttp/cookie/compare/v0.6.0...v0.7.1) Updates `express` from 4.21.0 to 4.21.1 - [Release notes](https://github.com/expressjs/express/releases) - [Changelog](https://github.com/expressjs/express/blob/4.21.1/History.md) - [Commits](https://github.com/expressjs/express/compare/4.21.0...4.21.1) --- updated-dependencies: - dependency-name: cookie dependency-type: indirect - dependency-name: express dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- html/pfappserver/root/package-lock.json | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/html/pfappserver/root/package-lock.json b/html/pfappserver/root/package-lock.json index 144ca5d45824..470dbe377edc 100644 --- a/html/pfappserver/root/package-lock.json +++ b/html/pfappserver/root/package-lock.json @@ -8151,9 +8151,9 @@ } }, "node_modules/cookie": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz", - "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==", + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz", + "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==", "dev": true, "engines": { "node": ">= 0.6" @@ -11046,9 +11046,9 @@ } }, "node_modules/express": { - "version": "4.21.0", - "resolved": "https://registry.npmjs.org/express/-/express-4.21.0.tgz", - "integrity": "sha512-VqcNGcj/Id5ZT1LZ/cfihi3ttTn+NJmkli2eZADigjq29qTlWi/hAQ43t/VLPq8+UX06FCEx3ByOYet6ZFblng==", + "version": "4.21.1", + "resolved": "https://registry.npmjs.org/express/-/express-4.21.1.tgz", + "integrity": "sha512-YSFlK1Ee0/GC8QaO91tHcDxJiE/X4FbpAyQWkxAvG6AXCuR65YzK8ua6D9hvi/TzUfZMpc+BwuM1IPw8fmQBiQ==", "dev": true, "dependencies": { "accepts": "~1.3.8", @@ -11056,7 +11056,7 @@ "body-parser": "1.20.3", "content-disposition": "0.5.4", "content-type": "~1.0.4", - "cookie": "0.6.0", + "cookie": "0.7.1", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", From 10304fb4f9a7563248983ecad6efc1e290420ea3 Mon Sep 17 00:00:00 2001 From: Satkunas <3904468+satkunas@users.noreply.github.com> Date: Wed, 27 Nov 2024 14:16:03 -0500 Subject: [PATCH 135/176] fix: t/mock_servers/node_radius/package.json & t/mock_servers/node_radius/package-lock.json to reduce vulnerabilities (#8341) The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-JS-COOKIE-8163060 Co-authored-by: snyk-bot --- t/mock_servers/node_radius/package-lock.json | 108 ++++++++----------- t/mock_servers/node_radius/package.json | 2 +- 2 files changed, 46 insertions(+), 64 deletions(-) diff --git a/t/mock_servers/node_radius/package-lock.json b/t/mock_servers/node_radius/package-lock.json index 585766d2fce8..5c74b3584c4f 100644 --- a/t/mock_servers/node_radius/package-lock.json +++ b/t/mock_servers/node_radius/package-lock.json @@ -10,7 +10,7 @@ "license": "GPL-2.0", "dependencies": { "dgram": "^1.0.1", - "express": "^4.20.0", + "express": "^4.21.1", "node-persist": "^3.1.0", "radius": "^1.1.4" } @@ -55,20 +55,6 @@ "npm": "1.2.8000 || >= 1.4.16" } }, - "node_modules/body-parser/node_modules/qs": { - "version": "6.13.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", - "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", - "dependencies": { - "side-channel": "^1.0.6" - }, - "engines": { - "node": ">=0.6" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/bytes": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", @@ -115,9 +101,10 @@ } }, "node_modules/cookie": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz", - "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==", + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz", + "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -220,23 +207,24 @@ } }, "node_modules/express": { - "version": "4.20.0", - "resolved": "https://registry.npmjs.org/express/-/express-4.20.0.tgz", - "integrity": "sha512-pLdae7I6QqShF5PnNTCVn4hI91Dx0Grkn2+IAsMTgMIKuQVte2dN9PeGSSAME2FR8anOhVA62QDIUaWVfEXVLw==", + "version": "4.21.1", + "resolved": "https://registry.npmjs.org/express/-/express-4.21.1.tgz", + "integrity": "sha512-YSFlK1Ee0/GC8QaO91tHcDxJiE/X4FbpAyQWkxAvG6AXCuR65YzK8ua6D9hvi/TzUfZMpc+BwuM1IPw8fmQBiQ==", + "license": "MIT", "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", "body-parser": "1.20.3", "content-disposition": "0.5.4", "content-type": "~1.0.4", - "cookie": "0.6.0", + "cookie": "0.7.1", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "etag": "~1.8.1", - "finalhandler": "1.2.0", + "finalhandler": "1.3.1", "fresh": "0.5.2", "http-errors": "2.0.0", "merge-descriptors": "1.0.3", @@ -245,11 +233,11 @@ "parseurl": "~1.3.3", "path-to-regexp": "0.1.10", "proxy-addr": "~2.0.7", - "qs": "6.11.0", + "qs": "6.13.0", "range-parser": "~1.2.1", "safe-buffer": "5.2.1", "send": "0.19.0", - "serve-static": "1.16.0", + "serve-static": "1.16.2", "setprototypeof": "1.2.0", "statuses": "2.0.1", "type-is": "~1.6.18", @@ -269,12 +257,13 @@ } }, "node_modules/finalhandler": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", - "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", + "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", + "license": "MIT", "dependencies": { "debug": "2.6.9", - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "on-finished": "2.4.1", "parseurl": "~1.3.3", @@ -285,6 +274,15 @@ "node": ">= 0.8" } }, + "node_modules/finalhandler/node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, "node_modules/forwarded": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", @@ -522,6 +520,7 @@ "version": "1.3.3", "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "license": "MIT", "engines": { "node": ">= 0.8" } @@ -544,11 +543,12 @@ } }, "node_modules/qs": { - "version": "6.11.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", - "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", + "license": "BSD-3-Clause", "dependencies": { - "side-channel": "^1.0.4" + "side-channel": "^1.0.6" }, "engines": { "node": ">=0.6" @@ -640,45 +640,27 @@ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" }, "node_modules/serve-static": { - "version": "1.16.0", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.0.tgz", - "integrity": "sha512-pDLK8zwl2eKaYrs8mrPZBJua4hMplRWJ1tIFksVC3FtBEBnl8dxgeHtsaMS8DhS9i4fLObaon6ABoc4/hQGdPA==", + "version": "1.16.2", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", + "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", + "license": "MIT", "dependencies": { - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "parseurl": "~1.3.3", - "send": "0.18.0" + "send": "0.19.0" }, "engines": { "node": ">= 0.8.0" } }, - "node_modules/serve-static/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - }, - "node_modules/serve-static/node_modules/send": { - "version": "0.18.0", - "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", - "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", - "dependencies": { - "debug": "2.6.9", - "depd": "2.0.0", - "destroy": "1.2.0", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "fresh": "0.5.2", - "http-errors": "2.0.0", - "mime": "1.6.0", - "ms": "2.1.3", - "on-finished": "2.4.1", - "range-parser": "~1.2.1", - "statuses": "2.0.1" - }, + "node_modules/serve-static/node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "license": "MIT", "engines": { - "node": ">= 0.8.0" + "node": ">= 0.8" } }, "node_modules/set-function-length": { diff --git a/t/mock_servers/node_radius/package.json b/t/mock_servers/node_radius/package.json index 900bdccdb7ed..ca8d57bd1918 100644 --- a/t/mock_servers/node_radius/package.json +++ b/t/mock_servers/node_radius/package.json @@ -10,7 +10,7 @@ "license": "GPL-2.0", "dependencies": { "dgram": "^1.0.1", - "express": "^4.20.0", + "express": "^4.21.1", "node-persist": "^3.1.0", "radius": "^1.1.4" } From 7eb145ad6dfd386b65f5358266a8426361ddf2e9 Mon Sep 17 00:00:00 2001 From: Extra Fu Date: Wed, 27 Nov 2024 14:16:40 -0500 Subject: [PATCH 136/176] fix: upgrade plotly.js-dist-min from 2.32.0 to 2.35.2 (#8336) Snyk has created this PR to upgrade plotly.js-dist-min from 2.32.0 to 2.35.2. See this package in npm: plotly.js-dist-min See this project in Snyk: https://app.snyk.io/org/akamai-esg-pilot-org/project/08730ed5-aba9-49b2-868f-15e5e652ebcc?utm_source=github&utm_medium=referral&page=upgrade-pr Co-authored-by: snyk-bot --- html/pfappserver/root/package-lock.json | 9 +++++---- html/pfappserver/root/package.json | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/html/pfappserver/root/package-lock.json b/html/pfappserver/root/package-lock.json index 470dbe377edc..be8c444f7094 100644 --- a/html/pfappserver/root/package-lock.json +++ b/html/pfappserver/root/package-lock.json @@ -22,7 +22,7 @@ "mixpanel-browser": "^2.50.0", "papaparse": "5.4.1", "pinia": "^2.1.7", - "plotly.js-dist-min": "^2.32.0", + "plotly.js-dist-min": "^2.35.2", "plotly.js-locales": "^2.32.0", "promised-debounce": "^0.4.2", "typeface-b612-mono": "1.1.13", @@ -19175,9 +19175,10 @@ } }, "node_modules/plotly.js-dist-min": { - "version": "2.32.0", - "resolved": "https://registry.npmjs.org/plotly.js-dist-min/-/plotly.js-dist-min-2.32.0.tgz", - "integrity": "sha512-UVznwUQVc7NeFih0tnIbvCpxct+Jxt6yxOGTYJF4vkKIUyujvyiTrH+XazglvcXdybFLERMu/IKt6Lhz3+BqMQ==" + "version": "2.35.2", + "resolved": "https://registry.npmjs.org/plotly.js-dist-min/-/plotly.js-dist-min-2.35.2.tgz", + "integrity": "sha512-oWDTf2kYOmTtEw3epeeSBdfH/H3OSktF0suST9oI6fIgKfbyd4MT7TPh8+CVzdHYllYon24Q0HI1hZjOnLqk6g==", + "license": "MIT" }, "node_modules/plotly.js-locales": { "version": "2.32.0", diff --git a/html/pfappserver/root/package.json b/html/pfappserver/root/package.json index d37711d396c4..25ca2b471830 100644 --- a/html/pfappserver/root/package.json +++ b/html/pfappserver/root/package.json @@ -25,7 +25,7 @@ "papaparse": "5.4.1", "pinia": "^2.1.7", "plotly.js-locales": "^2.32.0", - "plotly.js-dist-min": "^2.32.0", + "plotly.js-dist-min": "^2.35.2", "promised-debounce": "^0.4.2", "typeface-b612-mono": "1.1.13", "uuid": "^8.3.0", From e6ca57c5ec183de507a5b568a918a6eb90070d9e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Nov 2024 14:17:29 -0500 Subject: [PATCH 137/176] Bump rollup from 2.79.1 to 2.79.2 in /html/pfappserver/root (#8329) Bumps [rollup](https://github.com/rollup/rollup) from 2.79.1 to 2.79.2. - [Release notes](https://github.com/rollup/rollup/releases) - [Changelog](https://github.com/rollup/rollup/blob/master/CHANGELOG.md) - [Commits](https://github.com/rollup/rollup/compare/v2.79.1...v2.79.2) --- updated-dependencies: - dependency-name: rollup dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- html/pfappserver/root/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/html/pfappserver/root/package-lock.json b/html/pfappserver/root/package-lock.json index be8c444f7094..9b7f8717b965 100644 --- a/html/pfappserver/root/package-lock.json +++ b/html/pfappserver/root/package-lock.json @@ -22343,9 +22343,9 @@ } }, "node_modules/rollup": { - "version": "2.79.1", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-2.79.1.tgz", - "integrity": "sha512-uKxbd0IhMZOhjAiD5oAFp7BqvkA4Dv47qpOCtaNvng4HBwdbWtdOh8f5nZNuk2rp51PMGk3bzfWu5oayNEuYnw==", + "version": "2.79.2", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-2.79.2.tgz", + "integrity": "sha512-fS6iqSPZDs3dr/y7Od6y5nha8dW1YnbgtsyotCVvoFGKbERG++CVRFv1meyGDE1SNItQA8BrnCw7ScdAhRJ3XQ==", "dev": true, "peer": true, "bin": { From 955f7b24c50e140840df444360a0d43e33c81086 Mon Sep 17 00:00:00 2001 From: Extra Fu Date: Wed, 27 Nov 2024 14:18:02 -0500 Subject: [PATCH 138/176] fix: upgrade mixpanel-browser from 2.50.0 to 2.55.1 (#8328) Snyk has created this PR to upgrade mixpanel-browser from 2.50.0 to 2.55.1. See this package in npm: mixpanel-browser See this project in Snyk: https://app.snyk.io/org/akamai-esg-pilot-org/project/08730ed5-aba9-49b2-868f-15e5e652ebcc?utm_source=github&utm_medium=referral&page=upgrade-pr Co-authored-by: snyk-bot --- html/pfappserver/root/package-lock.json | 92 +++++++++++++++---------- html/pfappserver/root/package.json | 2 +- 2 files changed, 55 insertions(+), 39 deletions(-) diff --git a/html/pfappserver/root/package-lock.json b/html/pfappserver/root/package-lock.json index 9b7f8717b965..ae6b28c95c5b 100644 --- a/html/pfappserver/root/package-lock.json +++ b/html/pfappserver/root/package-lock.json @@ -19,7 +19,7 @@ "lodash": "^4.17.21", "messageformat": "^2.3.0", "mime-types": "^2.1.35", - "mixpanel-browser": "^2.50.0", + "mixpanel-browser": "^2.55.1", "papaparse": "5.4.1", "pinia": "^2.1.7", "plotly.js-dist-min": "^2.35.2", @@ -3776,11 +3776,12 @@ "peer": true }, "node_modules/@rrweb/types": { - "version": "2.0.0-alpha.13", - "resolved": "https://registry.npmjs.org/@rrweb/types/-/types-2.0.0-alpha.13.tgz", - "integrity": "sha512-ytq+MeVm/vP2ybw+gTAN3Xvt7HN2yS+wlbfnwHpQMftxrwzq0kEZHdw+Jp5WUvvpONWzXriNAUU9dW0qLGkzNg==", + "version": "2.0.0-alpha.17", + "resolved": "https://registry.npmjs.org/@rrweb/types/-/types-2.0.0-alpha.17.tgz", + "integrity": "sha512-AfDTVUuCyCaIG0lTSqYtrZqJX39ZEYzs4fYKnexhQ+id+kbZIpIJtaut5cto6dWZbB3SEe4fW0o90Po3LvTmfg==", + "license": "MIT", "dependencies": { - "rrweb-snapshot": "^2.0.0-alpha.13" + "rrweb-snapshot": "^2.0.0-alpha.17" } }, "node_modules/@rushstack/eslint-patch": { @@ -4479,7 +4480,8 @@ "node_modules/@types/css-font-loading-module": { "version": "0.0.7", "resolved": "https://registry.npmjs.org/@types/css-font-loading-module/-/css-font-loading-module-0.0.7.tgz", - "integrity": "sha512-nl09VhutdjINdWyXxHWN/w9zlNCfr60JUqJbd24YXUuCwgeL0TpFSdElCwb6cxfB6ybE19Gjj4g0jsgkXxKv1Q==" + "integrity": "sha512-nl09VhutdjINdWyXxHWN/w9zlNCfr60JUqJbd24YXUuCwgeL0TpFSdElCwb6cxfB6ybE19Gjj4g0jsgkXxKv1Q==", + "license": "MIT" }, "node_modules/@types/eslint": { "version": "8.4.5", @@ -5821,7 +5823,8 @@ "node_modules/@xstate/fsm": { "version": "1.6.5", "resolved": "https://registry.npmjs.org/@xstate/fsm/-/fsm-1.6.5.tgz", - "integrity": "sha512-b5o1I6aLNeYlU/3CPlj/Z91ybk1gUsKT+5NAJI+2W4UjvS5KLG28K9v5UvNoFVjHV8PajVZ00RH3vnjyQO7ZAw==" + "integrity": "sha512-b5o1I6aLNeYlU/3CPlj/Z91ybk1gUsKT+5NAJI+2W4UjvS5KLG28K9v5UvNoFVjHV8PajVZ00RH3vnjyQO7ZAw==", + "license": "MIT" }, "node_modules/@xtuc/ieee754": { "version": "1.2.0", @@ -6881,6 +6884,7 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/base64-arraybuffer/-/base64-arraybuffer-1.0.2.tgz", "integrity": "sha512-I3yl4r9QB5ZRY3XuJVEPfc2XhZO6YweFPI+UovAzn+8/hb3oJ6lnysaFcjVpkCPfVWFUDvoZ8kmVDP7WyRtYtQ==", + "license": "MIT", "engines": { "node": ">= 0.6.0" } @@ -11262,7 +11266,8 @@ "node_modules/fflate": { "version": "0.4.8", "resolved": "https://registry.npmjs.org/fflate/-/fflate-0.4.8.tgz", - "integrity": "sha512-FJqqoDBR00Mdj9ppamLa/Y7vxm+PRmNWA67N846RvsoYVMKB4q3y/de5PA7gUmRMYK/8CMz2GDZQmCRN1wBcWA==" + "integrity": "sha512-FJqqoDBR00Mdj9ppamLa/Y7vxm+PRmNWA67N846RvsoYVMKB4q3y/de5PA7gUmRMYK/8CMz2GDZQmCRN1wBcWA==", + "license": "MIT" }, "node_modules/figures": { "version": "2.0.0", @@ -17558,14 +17563,16 @@ "node_modules/mitt": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/mitt/-/mitt-3.0.1.tgz", - "integrity": "sha512-vKivATfr97l2/QBCYAkXYDbrIWPM2IIKEl7YPhjCvKlG3kE2gm+uBo6nEXK3M5/Ffh/FLpKExzOQ3JJoJGFKBw==" + "integrity": "sha512-vKivATfr97l2/QBCYAkXYDbrIWPM2IIKEl7YPhjCvKlG3kE2gm+uBo6nEXK3M5/Ffh/FLpKExzOQ3JJoJGFKBw==", + "license": "MIT" }, "node_modules/mixpanel-browser": { - "version": "2.50.0", - "resolved": "https://registry.npmjs.org/mixpanel-browser/-/mixpanel-browser-2.50.0.tgz", - "integrity": "sha512-iP4sbSRMemjWbnH+KQZRxZ360bcXtFpoQuUiWjjdw9AsURn0MrR9/2RnPOJ8J8tt1dMm7kTKwOjGV8pkbWbmAA==", + "version": "2.55.1", + "resolved": "https://registry.npmjs.org/mixpanel-browser/-/mixpanel-browser-2.55.1.tgz", + "integrity": "sha512-NSEPdFSJxoR1OCKWKHbtqd3BeH1c9NjXbEt0tN5TgBEO1nSDji6niU9n4MopAXOP0POET9spjpQKxZtLZKTJwA==", + "license": "Apache-2.0", "dependencies": { - "rrweb": "2.0.0-alpha.4" + "rrweb": "2.0.0-alpha.13" } }, "node_modules/mkdirp": { @@ -17644,15 +17651,16 @@ "integrity": "sha512-wynEP02LmIbLpcYw8uBKpcfF6dmg2vcpKqxeH5UcoKEYdExslsdUA4ugFauuaeYdTB76ez6gJW8XAZ6CgkXYxA==" }, "node_modules/nanoid": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", - "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==", + "version": "3.3.7", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "bin": { "nanoid": "bin/nanoid.cjs" }, @@ -19239,9 +19247,9 @@ } }, "node_modules/postcss": { - "version": "8.4.31", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", - "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", + "version": "8.4.47", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.47.tgz", + "integrity": "sha512-56rxCq7G/XfB4EkXq9Egn5GCqugWvDFjafDOThIdMBsI15iqPqR5r15TfSr1YPYeEI19YeaXMCbY6u88Y76GLQ==", "funding": [ { "type": "opencollective", @@ -19256,10 +19264,11 @@ "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "dependencies": { - "nanoid": "^3.3.6", - "picocolors": "^1.0.0", - "source-map-js": "^1.0.2" + "nanoid": "^3.3.7", + "picocolors": "^1.1.0", + "source-map-js": "^1.2.1" }, "engines": { "node": "^10 || ^12 || >=14" @@ -22424,32 +22433,38 @@ } }, "node_modules/rrdom": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/rrdom/-/rrdom-0.1.7.tgz", - "integrity": "sha512-ZLd8f14z9pUy2Hk9y636cNv5Y2BMnNEY99wxzW9tD2BLDfe1xFxtLjB4q/xCBYo6HRe0wofzKzjm4JojmpBfFw==", + "version": "2.0.0-alpha.17", + "resolved": "https://registry.npmjs.org/rrdom/-/rrdom-2.0.0-alpha.17.tgz", + "integrity": "sha512-b6caDiNcFO96Opp7TGdcVd4OLGSXu5dJe+A0IDiAu8mk7OmhqZCSDlgQdTKmdO5wMf4zPsUTgb8H/aNvR3kDHA==", + "license": "MIT", "dependencies": { - "rrweb-snapshot": "^2.0.0-alpha.4" + "rrweb-snapshot": "^2.0.0-alpha.17" } }, "node_modules/rrweb": { - "version": "2.0.0-alpha.4", - "resolved": "https://registry.npmjs.org/rrweb/-/rrweb-2.0.0-alpha.4.tgz", - "integrity": "sha512-wEHUILbxDPcNwkM3m4qgPgXAiBJyqCbbOHyVoNEVBJzHszWEFYyTbrZqUdeb1EfmTRC2PsumCIkVcomJ/xcOzA==", + "version": "2.0.0-alpha.13", + "resolved": "https://registry.npmjs.org/rrweb/-/rrweb-2.0.0-alpha.13.tgz", + "integrity": "sha512-a8GXOCnzWHNaVZPa7hsrLZtNZ3CGjiL+YrkpLo0TfmxGLhjNZbWY2r7pE06p+FcjFNlgUVTmFrSJbK3kO7yxvw==", + "license": "MIT", "dependencies": { - "@rrweb/types": "^2.0.0-alpha.4", + "@rrweb/types": "^2.0.0-alpha.13", "@types/css-font-loading-module": "0.0.7", "@xstate/fsm": "^1.4.0", "base64-arraybuffer": "^1.0.1", "fflate": "^0.4.4", "mitt": "^3.0.0", - "rrdom": "^0.1.7", - "rrweb-snapshot": "^2.0.0-alpha.4" + "rrdom": "^2.0.0-alpha.13", + "rrweb-snapshot": "^2.0.0-alpha.13" } }, "node_modules/rrweb-snapshot": { - "version": "2.0.0-alpha.13", - "resolved": "https://registry.npmjs.org/rrweb-snapshot/-/rrweb-snapshot-2.0.0-alpha.13.tgz", - "integrity": "sha512-slbhNBCYjxLGCeH95a67ECCy5a22nloXp1F5wF7DCzUNw80FN7tF9Lef1sRGLNo32g3mNqTc2sWLATlKejMxYw==" + "version": "2.0.0-alpha.17", + "resolved": "https://registry.npmjs.org/rrweb-snapshot/-/rrweb-snapshot-2.0.0-alpha.17.tgz", + "integrity": "sha512-GBg5pV8LHOTbeVmH2VHLEFR0mc2QpQMzAvcoxEGfPNWgWHc8UvKCyq7pqN1vA+fDZ+yXXbixeO0kB2pzVvFCBw==", + "license": "MIT", + "dependencies": { + "postcss": "^8.4.38" + } }, "node_modules/run-parallel": { "version": "1.2.0", @@ -23121,9 +23136,10 @@ } }, "node_modules/source-map-js": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", - "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } diff --git a/html/pfappserver/root/package.json b/html/pfappserver/root/package.json index 25ca2b471830..038ba65b08fe 100644 --- a/html/pfappserver/root/package.json +++ b/html/pfappserver/root/package.json @@ -21,7 +21,7 @@ "lodash": "^4.17.21", "messageformat": "^2.3.0", "mime-types": "^2.1.35", - "mixpanel-browser": "^2.50.0", + "mixpanel-browser": "^2.55.1", "papaparse": "5.4.1", "pinia": "^2.1.7", "plotly.js-locales": "^2.32.0", From 103888659e2261c558b9662cbe759cdc7344e5e0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Nov 2024 14:19:28 -0500 Subject: [PATCH 139/176] Bump cookie and express in /t/mock_servers/node_radius (#8405) Bumps [cookie](https://github.com/jshttp/cookie) to 0.7.1 and updates ancestor dependency [express](https://github.com/expressjs/express). These dependencies need to be updated together. Updates `cookie` from 0.6.0 to 0.7.1 - [Release notes](https://github.com/jshttp/cookie/releases) - [Commits](https://github.com/jshttp/cookie/compare/v0.6.0...v0.7.1) Updates `express` from 4.20.0 to 4.21.1 - [Release notes](https://github.com/expressjs/express/releases) - [Changelog](https://github.com/expressjs/express/blob/4.21.1/History.md) - [Commits](https://github.com/expressjs/express/compare/4.20.0...4.21.1) --- updated-dependencies: - dependency-name: cookie dependency-type: indirect - dependency-name: express dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> From 921fee816bb1f4c82f6dbef228805b2324f2e5df Mon Sep 17 00:00:00 2001 From: Darren+Satkunas Date: Wed, 27 Nov 2024 19:19:51 +0000 Subject: [PATCH 140/176] update NEWS --- NEWS.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/NEWS.asciidoc b/NEWS.asciidoc index df0aa8aeb066..2abe4ddaa501 100644 --- a/NEWS.asciidoc +++ b/NEWS.asciidoc @@ -57,7 +57,7 @@ For a list of compatibility related changes see the < Date: Wed, 27 Nov 2024 19:51:53 +0000 Subject: [PATCH 141/176] move fleetdm to integration --- .../root/src/views/Configuration/fleetDM/schema.js | 12 ++++++------ .../root/src/views/Configuration/index.vue | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/html/pfappserver/root/src/views/Configuration/fleetDM/schema.js b/html/pfappserver/root/src/views/Configuration/fleetDM/schema.js index c5ed789e8800..b85ba39b0d16 100644 --- a/html/pfappserver/root/src/views/Configuration/fleetDM/schema.js +++ b/html/pfappserver/root/src/views/Configuration/fleetDM/schema.js @@ -27,20 +27,20 @@ export default (props) => { email: yup.string().nullable() .when('id', { is: () => token === "", - then: yup.string().required(i18n.t('Email is required when token not specified.')).email(), - otherwise: yup.string() + then: yup.string().nullable().required(i18n.t('Email is required when token not specified.')).email(), + otherwise: yup.string().nullable() }).label(i18n.t('Email for performing FleetDM API Call')), password: yup.string().nullable() .when('id', { is: () => token === "", - then: yup.string().required(i18n.t('Password is required when token not specified.')).min(6), - otherwise: yup.string() + then: yup.string().nullable().required(i18n.t('Password is required when token not specified.')).min(6), + otherwise: yup.string().nullable() }).label(i18n.t('Password for performing FleetDM API Call')), token: yup.string().nullable() .when('id', { is: () => email === "" || password === "", - then: yup.string().required(i18n.t('Token is required when email / password not specified.')).min(20), - otherwise: yup.string() + then: yup.string().nullable().required(i18n.t('Token is required when email / password not specified.')).min(20), + otherwise: yup.string().nullable() }).label(i18n.t('Permanent API token for performing FleetDM API Call')), }) } diff --git a/html/pfappserver/root/src/views/Configuration/index.vue b/html/pfappserver/root/src/views/Configuration/index.vue index 5c3ff6df337a..519f3d667cd3 100644 --- a/html/pfappserver/root/src/views/Configuration/index.vue +++ b/html/pfappserver/root/src/views/Configuration/index.vue @@ -76,6 +76,7 @@ const setup = () => { { name: i18n.t('Cloud Services'), path: '/configuration/clouds' }, { name: i18n.t('Event Loggers'), path: '/configuration/event_loggers' }, { name: i18n.t('Firewall SSO'), path: '/configuration/firewalls' }, + { name: i18n.t('FleetDM'), path: '/configuration/fleetdm', class: 'no-saas'}, { name: i18n.t('Web Services'), path: '/configuration/webservices' }, { name: i18n.t('Switch Templates'), path: '/configuration/switch_templates' }, { name: i18n.t('Event Handlers'), path: '/configuration/pfdetect' }, @@ -153,7 +154,6 @@ const setup = () => { ] }, { name: i18n.t('Cluster'), path: '/configuration/active_active', class: 'no-saas' }, - { name: i18n.t('FleetDM'), path: '/configuration/fleetdm', class: 'no-saas'}, { name: i18n.t('RADIUS'), items: [ { name: i18n.t('General'), path: '/configuration/radius/general' }, From 000af7e761b04ae717ccb45cf7b5fcedff3e84a1 Mon Sep 17 00:00:00 2001 From: E-ThanG Date: Wed, 27 Nov 2024 14:51:18 -0800 Subject: [PATCH 142/176] Correct spacing on short variable declaration operator mapAKMSuite and mapCipherSuite had ": =" instead of ":=" --- go/cron/flush_radius_audit_log_job.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/go/cron/flush_radius_audit_log_job.go b/go/cron/flush_radius_audit_log_job.go index fdd8c9ce804d..f4828cacf075 100644 --- a/go/cron/flush_radius_audit_log_job.go +++ b/go/cron/flush_radius_audit_log_job.go @@ -482,12 +482,12 @@ func(a AKMSuite) String() string { } func mapAKMSuite(akmSuiteInt int) string { - akmSuiteSelector: = akmSuiteInt & 0x0000000F + akmSuiteSelector := akmSuiteInt & 0x0000000F return AKMSuite(akmSuiteSelector).String() } func mapCipherSuite(cipherSuiteInt int) string { - cipherSuiteSelector: = cipherSuiteInt & 0x0000000F + cipherSuiteSelector := cipherSuiteInt & 0x0000000F return CipherSuite(cipherSuiteSelector).String() } From ea793a230c34017504e28d44a7529635d6371716 Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Fri, 29 Nov 2024 14:18:39 -0500 Subject: [PATCH 143/176] Fixed nested group --- .../sources/_components/ldapCondition/useLdapAttributes.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/useLdapAttributes.js b/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/useLdapAttributes.js index c43ce60e882f..23c3e0612aa8 100644 --- a/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/useLdapAttributes.js +++ b/html/pfappserver/root/src/views/Configuration/sources/_components/ldapCondition/useLdapAttributes.js @@ -23,7 +23,7 @@ const useLdapAttributes = (props) => { watch(connectedToLdap, (newConnectionState) => { const { type } = form.value || {} - const extras = (type === 'AD') ? ['memberOf:1.2.840.113556.1.4.1941'] : [] + const extras = (type === 'AD') ? ['memberOf:1.2.840.113556.1.4.1941:'] : [] if (newConnectionState === true) { ldapAttributesLoading.value = true ldapClient.getAttributes().then((attributes) => { From 7e698a406c5c258dc038a5fc8cc29d72a8986034 Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Tue, 3 Dec 2024 16:43:32 -0500 Subject: [PATCH 144/176] Updated Meraki MR_v2 Switch module --- lib/pf/Switch/Meraki/MR_v2.pm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/pf/Switch/Meraki/MR_v2.pm b/lib/pf/Switch/Meraki/MR_v2.pm index 39b402396d9d..8a2705a5ef5e 100644 --- a/lib/pf/Switch/Meraki/MR_v2.pm +++ b/lib/pf/Switch/Meraki/MR_v2.pm @@ -22,7 +22,7 @@ use pf::util; use pf::node; use pf::util::radius qw(perform_coa perform_disconnect); -use base ('pf::Switch::Cisco::WLC'); +use base ('pf::Switch::Cisco::Cisco_WLC_AireOS'); sub description { 'Meraki cloud controller V2' } From 7542363028c684be05f5f5303dbafd31563500dc Mon Sep 17 00:00:00 2001 From: Durand Date: Tue, 3 Dec 2024 17:12:00 -0500 Subject: [PATCH 145/176] Feature/improve dpsk (#8356) * Improve DPSK speed * Make PSK Unique * Fix syntax error * Optimize find_user_by_psk * optimize find_user_by_psk * Continue optimization of find_user_by_psk * Fixes sql syntax * Test if the psk is defined for the user tied to the device * Test if the psk is not empty --------- Co-authored-by: James Rouzier --- db/pf-schema-X.Y.sql | 3 +- db/upgrade-X.X-X.Y.sql | 3 + .../PacketFence/Controller/Status.pm | 3 + .../PacketFence/Controller/WirelessProfile.pm | 3 + .../DynamicRouting/Module/Provisioning.pm | 8 ++- lib/pf/Switch/OpenWiFi.pm | 38 +++++------ lib/pf/Switch/Ruckus/SmartZone.pm | 64 +++++++++++-------- lib/pf/provisioner/mobileconfig.pm | 26 +++++--- lib/pf/radius.pm | 2 +- 9 files changed, 93 insertions(+), 57 deletions(-) diff --git a/db/pf-schema-X.Y.sql b/db/pf-schema-X.Y.sql index 29f4f630b06b..83e8cb019c38 100644 --- a/db/pf-schema-X.Y.sql +++ b/db/pf-schema-X.Y.sql @@ -79,7 +79,8 @@ CREATE TABLE person ( `potd` enum('no','yes') NOT NULL DEFAULT 'no', `otp` MEDIUMTEXT NULL DEFAULT NULL, `sponsored_date` DATETIME DEFAULT NULL, - PRIMARY KEY (`pid`) + PRIMARY KEY (`pid`), + UNIQUE KEY person_psk (`psk`) ) ENGINE=InnoDB DEFAULT CHARACTER SET = 'utf8mb4' COLLATE = 'utf8mb4_general_ci'; -- diff --git a/db/upgrade-X.X-X.Y.sql b/db/upgrade-X.X-X.Y.sql index 0de7815f93f2..69582502bfaa 100644 --- a/db/upgrade-X.X-X.Y.sql +++ b/db/upgrade-X.X-X.Y.sql @@ -67,6 +67,9 @@ ALTER TABLE `pki_certs` \! echo "Adding default timestamp to RADIUS audit logs"; ALTER TABLE radius_audit_log MODIFY created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP; +\! echo "Make psk unique"; +ALTER TABLE person ADD CONSTRAINT UNIQUE person_psk (`psk`); + \! echo "Incrementing PacketFence schema version..."; INSERT IGNORE INTO pf_version (id, version, created_at) VALUES (@VERSION_INT, CONCAT_WS('.', @MAJOR_VERSION, @MINOR_VERSION), NOW()); diff --git a/html/captive-portal/lib/captiveportal/PacketFence/Controller/Status.pm b/html/captive-portal/lib/captiveportal/PacketFence/Controller/Status.pm index 3d0061715b62..e20d13d5c631 100644 --- a/html/captive-portal/lib/captiveportal/PacketFence/Controller/Status.pm +++ b/html/captive-portal/lib/captiveportal/PacketFence/Controller/Status.pm @@ -113,6 +113,9 @@ sub userIsAuthenticated : Private { foreach my $provisioner (@provisioners) { next unless $provisioner->isa("pf::provisioner::dpsk"); my $dpsk = $provisioner->generate_dpsk($pid); + if (!$dpsk) { + $dpsk = "Error generating the PSK, contact your administrator"; + } $c->stash( psk => $dpsk); } } diff --git a/html/captive-portal/lib/captiveportal/PacketFence/Controller/WirelessProfile.pm b/html/captive-portal/lib/captiveportal/PacketFence/Controller/WirelessProfile.pm index 98c4e3f5d693..5e697246c74a 100644 --- a/html/captive-portal/lib/captiveportal/PacketFence/Controller/WirelessProfile.pm +++ b/html/captive-portal/lib/captiveportal/PacketFence/Controller/WirelessProfile.pm @@ -43,6 +43,9 @@ sub index : Path : Args(0) { my $psk; if (isenabled($provisioner->dpsk)) { $psk = $provisioner->generate_dpsk($c->session->{username}); + if (!$psk) { + $psk = "Error generating the PSK, contact the administrator"; + } } else { $psk = $provisioner->passcode; } diff --git a/html/captive-portal/lib/captiveportal/PacketFence/DynamicRouting/Module/Provisioning.pm b/html/captive-portal/lib/captiveportal/PacketFence/DynamicRouting/Module/Provisioning.pm index 09c272ed088c..f91f11b5fa65 100644 --- a/html/captive-portal/lib/captiveportal/PacketFence/DynamicRouting/Module/Provisioning.pm +++ b/html/captive-portal/lib/captiveportal/PacketFence/DynamicRouting/Module/Provisioning.pm @@ -143,7 +143,13 @@ sub execute_child { $self->done(); } elsif ($self->is_dpsk) { - $self->show_provisioning({psk => $provisioner->generate_dpsk($self->username), ssid => $provisioner->ssid}); + my $psk = $provisioner->generate_dpsk($self->username); + if ($psk) { + $self->show_provisioning({psk => $psk, ssid => $provisioner->ssid}); + } else { + $self->app->flash->{error} = [ "Error trying to generate your PSK. Please contact the administrator." ]; + $self->show_provisioning(); + } } else { my $result = $provisioner->authorize_enforce($mac); if ($result == 0) { diff --git a/lib/pf/Switch/OpenWiFi.pm b/lib/pf/Switch/OpenWiFi.pm index 3383eaf721dd..0a40244bb0e2 100644 --- a/lib/pf/Switch/OpenWiFi.pm +++ b/lib/pf/Switch/OpenWiFi.pm @@ -59,43 +59,45 @@ sub returnRadiusAccessAccept { } sub find_user_by_psk { - my ($self, $radius_request) = @_; + my ($self, $radius_request, $args) = @_; + my @parts = split(":", $radius_request->{"Called-Station-Id"}); + my $ssid = pop @parts; + my $bssid = join("", @parts); + $bssid =~ s/-//g; + my $cache = $self->cache; + if (exists $args->{'owner'} && $args->{'owner'}->{'pid'} ne "" && exists $args->{'owner'}->{'psk'} && defined $args->{'owner'}->{'psk'} && $args->{'owner'}->{'psk'} ne "") { + if(check_if_radius_request_psk_matches($cache, $radius_request, $args->{'owner'}->{'psk'}, $ssid, $bssid)) { + get_logger->info("PSK matches the pid associated with the mac ".$args->{'owner'}->{'pid'}); + return $args->{'owner'}->{'pid'}; + } + } my ($status, $iter) = pf::dal::person->search( -where => { psk => {'!=' => [-and => '', undef]}, }, + -columns => [qw(pid psk)], + -no_default_join => 1, ); my $matched = 0; my $pid; + # Try first the pid of the mac address while(my $person = $iter->next) { get_logger->debug("User ".$person->{pid}." has a PSK. Checking if it matches the one in the packet"); - if($self->check_if_radius_request_psk_matches($radius_request, $person->{psk})) { + if(check_if_radius_request_psk_matches($cache, $radius_request, $person->{psk}, $ssid, $bssid)) { get_logger->info("PSK matches the one of ".$person->{pid}); - $matched ++; $pid = $person->{pid}; + last; } } - - if($matched > 1) { - get_logger->error("Multiple users use the same PSK. This cannot work with unbound DPSK. Ignoring it."); - return undef; - } - else { - return $pid; - } + return $pid; } sub check_if_radius_request_psk_matches { - my ($self, $radius_request, $psk) = @_; - - my @parts = split(":", $radius_request->{"Called-Station-Id"}); - my $ssid = pop @parts; - my $bssid = join("", @parts); - $bssid =~ s/-//g; + my ($cache, $radius_request, $psk, $ssid, $bssid) = @_; - my $pmk = $self->cache->compute( + my $pmk = $cache->compute( "OpenWiFi::check_if_radius_request_psk_matches::PMK::$ssid+$psk", "1 month", sub { pf::util::wpa::calculate_pmk($ssid, $psk) }, diff --git a/lib/pf/Switch/Ruckus/SmartZone.pm b/lib/pf/Switch/Ruckus/SmartZone.pm index dac2a56fa635..9ff7e5be7457 100644 --- a/lib/pf/Switch/Ruckus/SmartZone.pm +++ b/lib/pf/Switch/Ruckus/SmartZone.pm @@ -327,55 +327,65 @@ sub generate_dpsk_attribute_value { sub find_user_by_psk { - my ($self, $radius_request) = @_; + my ($self, $radius_request, $args) = @_; + my $pid; + if($radius_request->{"Ruckus-DPSK-Cipher"} != 4) { + get_logger->error("Ruckus-DPSK-Cipher isn't for WPA2 that uses AES and HMAC-SHA1. This isn't supported by this module."); + return $pid; + } + + my $ssid = $radius_request->{'Ruckus-Wlan-Name'}; + my $bssid = pack("H*", pf::util::wpa::strip_hex_prefix($radius_request->{"Ruckus-BSSID"})); + my $username = pack("H*", $radius_request->{'User-Name'}); + my $anonce = pack('H*', pf::util::wpa::strip_hex_prefix($radius_request->{'Ruckus-DPSK-Anonce'})); + my $snonce = pf::util::wpa::snonce_from_eapol_key_frame(pack("H*", pf::util::wpa::strip_hex_prefix($radius_request->{"Ruckus-DPSK-EAPOL-Key-Frame"}))); + my $eapol_key_frame = pack("H*", pf::util::wpa::strip_hex_prefix($radius_request->{"Ruckus-DPSK-EAPOL-Key-Frame"})); + my $cache = $self->cache; + # Try first the pid of the mac address + if (exists $args->{'owner'} && $args->{'owner'}->{'pid'} ne "" && exists $args->{'owner'}->{'psk'} && defined $args->{'owner'}->{'psk'} && $args->{'owner'}->{'psk'} ne "") { + if (check_if_radius_request_psk_matches($cache, $radius_request, $args->{'owner'}->{'psk'}, $ssid, $bssid, $username, $anonce, $snonce, $eapol_key_frame)) { + get_logger->info("PSK matches the pid associated with the mac ".$args->{'owner'}->{'pid'}); + return $args->{'owner'}->{'pid'}; + } + } + my ($status, $iter) = pf::dal::person->search( -where => { psk => {'!=' => [-and => '', undef]}, }, + -columns => [qw(pid psk)], + -no_default_join => 1, ); - my $matched = 0; - my $pid; - while(my $person = $iter->next) { + while (my $person = $iter->next) { get_logger->debug("User ".$person->{pid}." has a PSK. Checking if it matches the one in the packet"); - if($self->check_if_radius_request_psk_matches($radius_request, $person->{psk})) { + if (check_if_radius_request_psk_matches($cache, $radius_request, $person->{psk}, $ssid, $bssid, $username, $anonce, $snonce, $eapol_key_frame)) { get_logger->info("PSK matches the one of ".$person->{pid}); - $matched ++; $pid = $person->{pid}; + last; } } - - if($matched > 1) { - get_logger->error("Multiple users use the same PSK. This cannot work with unbound DPSK. Ignoring it."); - return undef; - } - else { - return $pid; - } + return $pid; } sub check_if_radius_request_psk_matches { - my ($self, $radius_request, $psk) = @_; - if($radius_request->{"Ruckus-DPSK-Cipher"} != 4) { - get_logger->error("Ruckus-DPSK-Cipher isn't for WPA2 that uses AES and HMAC-SHA1. This isn't supported by this module."); - return $FALSE; - } + my ($cache, $radius_request, $psk, $ssid, $bssid, $username, $anonce, $snonce, $eapol_key_frame) = @_; - my $pmk = $self->cache->compute( - "Ruckus::SmartZone::check_if_radius_request_psk_matches::PMK::$radius_request->{'Ruckus-Wlan-Name'}+$psk", + my $pmk = $cache->compute( + "Ruckus::SmartZone::check_if_radius_request_psk_matches::PMK::$ssid+$psk", {expires_in => '1 month', expires_variance => '.20'}, - sub { pf::util::wpa::calculate_pmk($radius_request->{"Ruckus-Wlan-Name"}, $psk) }, + sub { pf::util::wpa::calculate_pmk($ssid, $psk) }, ); return pf::util::wpa::match_mic( pf::util::wpa::calculate_ptk( $pmk, - pack("H*", pf::util::wpa::strip_hex_prefix($radius_request->{"Ruckus-BSSID"})), - pack("H*", $radius_request->{"User-Name"}), - pack("H*", pf::util::wpa::strip_hex_prefix($radius_request->{"Ruckus-DPSK-Anonce"})), - pf::util::wpa::snonce_from_eapol_key_frame(pack("H*", pf::util::wpa::strip_hex_prefix($radius_request->{"Ruckus-DPSK-EAPOL-Key-Frame"}))), + $bssid, + $username, + $anonce, + $snonce, ), - pack("H*", pf::util::wpa::strip_hex_prefix($radius_request->{"Ruckus-DPSK-EAPOL-Key-Frame"})), + $eapol_key_frame, ); } diff --git a/lib/pf/provisioner/mobileconfig.pm b/lib/pf/provisioner/mobileconfig.pm index 9096d6644505..cef22eccdfcb 100644 --- a/lib/pf/provisioner/mobileconfig.pm +++ b/lib/pf/provisioner/mobileconfig.pm @@ -344,14 +344,21 @@ sub generate_dpsk { && length($password->{password}) >= 8 ) { get_logger->info("Using password of local user $username for PSK"); - person_modify($username,psk => $password->{password}); - return $password->{password}; + if (person_modify($username,psk => $password->{password})) { + return $password->{password}; + } + + return 0; } - elsif (ref($person) eq 'HASH' && defined $person->{psk} && $person->{psk} ne '') { + + if (ref($person) eq 'HASH' && defined $person->{psk} && $person->{psk} ne '') { get_logger->debug("Returning psk key $person->{psk} for user $username"); return $person->{psk}; } - else { + + my $retry = $FALSE; + my $psk; + while(!$retry) { my $psk_size; if ($self->psk_size >= 8) { $psk_size = $self->psk_size; @@ -359,12 +366,13 @@ sub generate_dpsk { $psk_size = 8; get_logger->info("PSK key redefined to 8"); } - my $psk = word(8,$psk_size); - person_modify($username,psk => $psk); - get_logger->info("PSK key has been generated for user ".$username); - get_logger->debug("Returning psk key $psk for user $username"); - return $psk; + $psk = word(8,$psk_size); + $retry = person_modify($username,psk => $psk); } + + get_logger->info("PSK key has been generated for user ".$username); + get_logger->debug("Returning psk key $psk for user $username"); + return $psk; } =head1 AUTHOR diff --git a/lib/pf/radius.pm b/lib/pf/radius.pm index 9026c23379e5..b7432a09b9aa 100644 --- a/lib/pf/radius.pm +++ b/lib/pf/radius.pm @@ -1388,7 +1388,7 @@ sub handleUnboundDPSK { if($profile->unboundDpskEnabled()) { my $accept = $FALSE; - if(my $pid = $switch->find_user_by_psk($radius_request)) { + if(my $pid = $switch->find_user_by_psk($radius_request,$args)) { $logger->info("Unbound DPSK user found $pid. Changing this request to use the 802.1x logic"); $connection->isMacAuth($FALSE); $connection->is8021X($TRUE); From 34f9686c4d6cce3f9613f25d7157de80e2e57ade Mon Sep 17 00:00:00 2001 From: Durand Date: Tue, 3 Dec 2024 17:13:06 -0500 Subject: [PATCH 146/176] Fixes for disconnect on Mikrotik (#8418) --- lib/pf/Switch/Mikrotik.pm | 40 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 3 deletions(-) diff --git a/lib/pf/Switch/Mikrotik.pm b/lib/pf/Switch/Mikrotik.pm index 8ef4cddafa22..429fa4a5fede 100644 --- a/lib/pf/Switch/Mikrotik.pm +++ b/lib/pf/Switch/Mikrotik.pm @@ -37,6 +37,7 @@ sub description { 'Mikrotik' } use pf::Switch::constants; use pf::util; use pf::util::radius qw(perform_disconnect); +use pf::accounting qw(node_accounting_dynauth_attr); =head1 SUBROUTINES @@ -245,12 +246,15 @@ sub radiusDisconnect { try { my $connection_info = $self->radius_deauth_connection_info($send_disconnect_to); - # transforming MAC to the expected format 00:11:22:33:CA:FE - $mac = uc($mac); + #Fetching the acct-session-id + my $dynauth = node_accounting_dynauth_attr($mac); + + $logger->debug("deauthenticate $mac using RADIUS Disconnect-Request deauth method"); # Standard Attributes my $attributes_ref = { - 'User-Name' => "$mac", + 'User-Name' => $dynauth->{'username'}, + 'Acct-Session-Id' => $dynauth->{'acctsessionid'}, }; # merging additional attributes provided by caller to the standard attributes @@ -442,6 +446,36 @@ sub deauthenticateMacSSHDHCP { return 1; } +=item parseRequest + +Takes FreeRADIUS' RAD_REQUEST hash and process it to return +NAS Port type (Ethernet, Wireless, etc.) +Network Device IP +EAP +MAC +NAS-Port (port) +User-Name + +=cut + +sub parseRequest { + my ( $self, $radius_request ) = @_; + + my $client_mac = ref($radius_request->{'Calling-Station-Id'}) eq 'ARRAY' + ? clean_mac($radius_request->{'Calling-Station-Id'}[0]) + : clean_mac($radius_request->{'Calling-Station-Id'}); + my $user_name = $self->parseRequestUsername($radius_request); + my $nas_port_type = ( defined($radius_request->{'NAS-Port-Type'}) ? $radius_request->{'NAS-Port-Type'} : ( defined($radius_request->{'Called-Station-SSID'}) ? "Wireless-802.11" : undef ) ); + my $port = $radius_request->{'NAS-Port'}; + my $eap_type = ( exists($radius_request->{'EAP-Type'}) ? $radius_request->{'EAP-Type'} : 0 ); + my $nas_port_id = ( defined($radius_request->{'NAS-Port-Id'}) ? $radius_request->{'NAS-Port-Id'} : undef ); + # Store the radius request if it contains accounting attribute + if (exists($radius_request->{'Acct-Session-Id'})) { + pf::accounting->cache->set($client_mac, $radius_request); + } + return ($nas_port_type, $eap_type, $client_mac, $port, $user_name, $nas_port_id, undef, $nas_port_id); +} + =back =head1 AUTHOR From 25edfa0b1939b668bc186c53383cd8c3a0a5369e Mon Sep 17 00:00:00 2001 From: Durand Date: Tue, 3 Dec 2024 17:16:00 -0500 Subject: [PATCH 147/176] Select the first device that match the MFA method (#8400) * Try to find the device that match the method * Added message in the radius reply * Fixed indentation * Log MFA status in radius reply --- lib/pf/Switch/Aruba.pm | 2 + lib/pf/Switch/Aruba/5400.pm | 2 + lib/pf/Switch/Aruba/ArubaOS_CX_10_x.pm | 2 + lib/pf/Switch/Avaya.pm | 2 + lib/pf/Switch/Brocade.pm | 2 + lib/pf/Switch/Cisco.pm | 2 + lib/pf/Switch/Cisco/ASA.pm | 1 + lib/pf/Switch/Cisco/Cisco_WLC_AireOS.pm | 2 + lib/pf/Switch/Dell/N1500.pm | 2 + lib/pf/Switch/Extreme.pm | 2 + lib/pf/Switch/F5.pm | 1 + lib/pf/Switch/Fortinet/FortiGate.pm | 1 + lib/pf/Switch/Generic.pm | 2 + lib/pf/Switch/GenericVPN.pm | 1 + lib/pf/Switch/H3C.pm | 2 + lib/pf/Switch/HP.pm | 2 + lib/pf/Switch/Juniper.pm | 2 + lib/pf/Switch/OpenVPN.pm | 1 + lib/pf/Switch/Template.pm | 1 + lib/pf/Switch/Ubiquiti/EdgeSwitch.pm | 2 + lib/pf/Switch/Xirrus.pm | 2 + lib/pf/mfa/Akamai.pm | 105 +++++++++++++++--------- lib/pf/mfa/TOTP.pm | 22 +++-- lib/pf/radius.pm | 37 +++++---- 24 files changed, 136 insertions(+), 64 deletions(-) diff --git a/lib/pf/Switch/Aruba.pm b/lib/pf/Switch/Aruba.pm index 83390368b9db..4b3998f5edf1 100644 --- a/lib/pf/Switch/Aruba.pm +++ b/lib/pf/Switch/Aruba.pm @@ -630,6 +630,7 @@ sub returnAuthorizeWrite { my $status; $radius_reply_ref->{'Class'} = 'root'; $radius_reply_ref->{'Reply-Message'} = "Switch enable access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with write access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeWrite', $args); @@ -650,6 +651,7 @@ sub returnAuthorizeRead { my $status; $radius_reply_ref->{'Class'} = 'read-only'; $radius_reply_ref->{'Reply-Message'} = "Switch read access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with read access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeRead', $args); diff --git a/lib/pf/Switch/Aruba/5400.pm b/lib/pf/Switch/Aruba/5400.pm index 62987fb898a9..dcc751b93334 100644 --- a/lib/pf/Switch/Aruba/5400.pm +++ b/lib/pf/Switch/Aruba/5400.pm @@ -79,6 +79,7 @@ sub returnAuthorizeWrite { my $status; $radius_reply_ref->{'Service-Type'} = 'Administrative-User'; $radius_reply_ref->{'Reply-Message'} = "Switch enable access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with write access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeWrite', $args); @@ -99,6 +100,7 @@ sub returnAuthorizeRead { my $status; $radius_reply_ref->{'Service-Type'} = 'NAS-Prompt-User'; $radius_reply_ref->{'Reply-Message'} = "Switch read access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with read access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeRead', $args); diff --git a/lib/pf/Switch/Aruba/ArubaOS_CX_10_x.pm b/lib/pf/Switch/Aruba/ArubaOS_CX_10_x.pm index 2bfb05d17905..39991102ebe3 100644 --- a/lib/pf/Switch/Aruba/ArubaOS_CX_10_x.pm +++ b/lib/pf/Switch/Aruba/ArubaOS_CX_10_x.pm @@ -85,6 +85,7 @@ sub returnAuthorizeWrite { my $status; $radius_reply_ref->{'Service-Type'} = 'Administrative-User'; $radius_reply_ref->{'Reply-Message'} = "Switch enable access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with write access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeWrite', $args); @@ -105,6 +106,7 @@ sub returnAuthorizeRead { my $status; $radius_reply_ref->{'Service-Type'} = 'NAS-Prompt-User'; $radius_reply_ref->{'Reply-Message'} = "Switch read access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with read access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeRead', $args); diff --git a/lib/pf/Switch/Avaya.pm b/lib/pf/Switch/Avaya.pm index 9be5031c0384..6a39557ddaaa 100644 --- a/lib/pf/Switch/Avaya.pm +++ b/lib/pf/Switch/Avaya.pm @@ -652,6 +652,7 @@ sub returnAuthorizeRead { my $status; $radius_reply_ref->{'Service-Type'} = '7'; $radius_reply_ref->{'Reply-Message'} = "Switch read access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with read access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeRead', $args); @@ -672,6 +673,7 @@ sub returnAuthorizeWrite { my $status; $radius_reply_ref->{'Service-Type'} = '6'; $radius_reply_ref->{'Reply-Message'} = "Switch enable access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with write access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeWrite', $args); diff --git a/lib/pf/Switch/Brocade.pm b/lib/pf/Switch/Brocade.pm index 4acedbc5d195..c283103861a4 100644 --- a/lib/pf/Switch/Brocade.pm +++ b/lib/pf/Switch/Brocade.pm @@ -317,6 +317,7 @@ sub returnAuthorizeWrite { my $status; $radius_reply_ref->{'Foundry-Privilege-Level'} = '0'; $radius_reply_ref->{'Reply-Message'} = "Switch enable access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with write access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeWrite', $args); @@ -338,6 +339,7 @@ sub returnAuthorizeRead { my $status; $radius_reply_ref->{'Foundry-Privilege-Level'} = '5'; $radius_reply_ref->{'Reply-Message'} = "Switch read access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with read access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeRead', $args); diff --git a/lib/pf/Switch/Cisco.pm b/lib/pf/Switch/Cisco.pm index cf4fbb46a722..ebdab0c90858 100644 --- a/lib/pf/Switch/Cisco.pm +++ b/lib/pf/Switch/Cisco.pm @@ -1599,6 +1599,7 @@ sub returnAuthorizeWrite { my $status; $radius_reply_ref->{'Cisco-AVPair'} = 'shell:priv-lvl=15'; $radius_reply_ref->{'Reply-Message'} = "Switch enable access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with write access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeWrite', $args); @@ -1620,6 +1621,7 @@ sub returnAuthorizeRead { my $status; $radius_reply_ref->{'Cisco-AVPair'} = 'shell:priv-lvl=3'; $radius_reply_ref->{'Reply-Message'} = "Switch read access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with read access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeRead', $args); diff --git a/lib/pf/Switch/Cisco/ASA.pm b/lib/pf/Switch/Cisco/ASA.pm index 51fcfdefec79..8e133e2bf9a3 100644 --- a/lib/pf/Switch/Cisco/ASA.pm +++ b/lib/pf/Switch/Cisco/ASA.pm @@ -157,6 +157,7 @@ sub returnAuthorizeVPN { my $status = shift @super_reply; my %radius_reply = @super_reply; my $radius_reply_ref = \%radius_reply; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'} if exists $args->{'message'}; return [$status, %$radius_reply_ref] if($status == $RADIUS::RLM_MODULE_USERLOCK); my $role; diff --git a/lib/pf/Switch/Cisco/Cisco_WLC_AireOS.pm b/lib/pf/Switch/Cisco/Cisco_WLC_AireOS.pm index ae93a9fc7821..50754c5b146d 100644 --- a/lib/pf/Switch/Cisco/Cisco_WLC_AireOS.pm +++ b/lib/pf/Switch/Cisco/Cisco_WLC_AireOS.pm @@ -363,6 +363,7 @@ sub returnAuthorizeWrite { my $status; $radius_reply_ref->{'Service-Type'} = 'Administrative-User'; $radius_reply_ref->{'Reply-Message'} = "Switch enable access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with write access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeWrite', $args); @@ -384,6 +385,7 @@ sub returnAuthorizeRead { my $status; $radius_reply_ref->{'Service-Type'} = 'NAS-Prompt-User'; $radius_reply_ref->{'Reply-Message'} = "Switch read access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with read access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeRead', $args); diff --git a/lib/pf/Switch/Dell/N1500.pm b/lib/pf/Switch/Dell/N1500.pm index 234870fd91cf..e07c60ef778a 100644 --- a/lib/pf/Switch/Dell/N1500.pm +++ b/lib/pf/Switch/Dell/N1500.pm @@ -123,6 +123,7 @@ sub returnAuthorizeWrite { my $status; $radius_reply_ref->{'Cisco-AVPair'} = 'shell:priv-lvl=15'; $radius_reply_ref->{'Reply-Message'} = "Switch enable access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with write access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeWrite', $args); @@ -144,6 +145,7 @@ sub returnAuthorizeRead { my $status; $radius_reply_ref->{'Cisco-AVPair'} = 'shell:priv-lvl=3'; $radius_reply_ref->{'Reply-Message'} = "Switch read access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with read access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeRead', $args); diff --git a/lib/pf/Switch/Extreme.pm b/lib/pf/Switch/Extreme.pm index c0b715e3454c..054980a8228e 100644 --- a/lib/pf/Switch/Extreme.pm +++ b/lib/pf/Switch/Extreme.pm @@ -1535,6 +1535,7 @@ sub returnAuthorizeRead { my $status; $radius_reply_ref->{'Service-Type'} = '0'; $radius_reply_ref->{'Reply-Message'} = "Switch read access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with read access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeRead', $args); @@ -1555,6 +1556,7 @@ sub returnAuthorizeWrite { my $status; $radius_reply_ref->{'Service-Type'} = '6'; $radius_reply_ref->{'Reply-Message'} = "Switch enable access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with write access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeWrite', $args); diff --git a/lib/pf/Switch/F5.pm b/lib/pf/Switch/F5.pm index 94059a2b9512..497a06001b99 100644 --- a/lib/pf/Switch/F5.pm +++ b/lib/pf/Switch/F5.pm @@ -176,6 +176,7 @@ sub returnAuthorizeVPN { my $radius_reply_ref = {}; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'} if exists $args->{'message'}; my $status; # should this node be kicked out? my $kick = $self->handleRadiusDeny($args); diff --git a/lib/pf/Switch/Fortinet/FortiGate.pm b/lib/pf/Switch/Fortinet/FortiGate.pm index da54d325c5dd..069418fd2649 100644 --- a/lib/pf/Switch/Fortinet/FortiGate.pm +++ b/lib/pf/Switch/Fortinet/FortiGate.pm @@ -271,6 +271,7 @@ sub returnAuthorizeVPN { my $radius_reply_ref = {}; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'} if exists $args->{'message'}; my $status; # should this node be kicked out? my $kick = $self->handleRadiusDeny($args); diff --git a/lib/pf/Switch/Generic.pm b/lib/pf/Switch/Generic.pm index 0c73771d168d..8152e0598b3d 100644 --- a/lib/pf/Switch/Generic.pm +++ b/lib/pf/Switch/Generic.pm @@ -47,6 +47,7 @@ sub returnAuthorizeWrite { my $radius_reply_ref; my $status; $radius_reply_ref->{'Reply-Message'} = "Switch enable access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with write access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeWrite', $args); @@ -67,6 +68,7 @@ sub returnAuthorizeRead { my $radius_reply_ref; my $status; $radius_reply_ref->{'Reply-Message'} = "Switch read access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with read access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeRead', $args); diff --git a/lib/pf/Switch/GenericVPN.pm b/lib/pf/Switch/GenericVPN.pm index 4963c56a279b..e6868f415a62 100644 --- a/lib/pf/Switch/GenericVPN.pm +++ b/lib/pf/Switch/GenericVPN.pm @@ -78,6 +78,7 @@ sub returnAuthorizeVPN { my $radius_reply_ref = {}; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'} if exists $args->{'message'}; my $status; # should this node be kicked out? my $kick = $self->handleRadiusDeny($args); diff --git a/lib/pf/Switch/H3C.pm b/lib/pf/Switch/H3C.pm index 7bc9245dca0d..d5ccc9922831 100644 --- a/lib/pf/Switch/H3C.pm +++ b/lib/pf/Switch/H3C.pm @@ -200,6 +200,7 @@ sub returnAuthorizeWrite { my $radius_reply_ref; my $status; $radius_reply_ref->{'Reply-Message'} = "Switch enable access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with write access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeWrite', $args); @@ -220,6 +221,7 @@ sub returnAuthorizeRead { my $radius_reply_ref; my $status; $radius_reply_ref->{'Reply-Message'} = "Switch read access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with read access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeRead', $args); diff --git a/lib/pf/Switch/HP.pm b/lib/pf/Switch/HP.pm index 41dc08abac7a..887a5f643a8b 100644 --- a/lib/pf/Switch/HP.pm +++ b/lib/pf/Switch/HP.pm @@ -524,6 +524,7 @@ sub returnAuthorizeWrite { my $status; $radius_reply_ref->{'Service-Type'} = 'Administrative-User'; $radius_reply_ref->{'Reply-Message'} = "Switch enable access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with write access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeWrite', $args); @@ -544,6 +545,7 @@ sub returnAuthorizeRead { my $status; $radius_reply_ref->{'Service-Type'} = 'NAS-Prompt-User'; $radius_reply_ref->{'Reply-Message'} = "Switch read access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with read access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeRead', $args); diff --git a/lib/pf/Switch/Juniper.pm b/lib/pf/Switch/Juniper.pm index ed5c39595a52..a8128a25ee23 100644 --- a/lib/pf/Switch/Juniper.pm +++ b/lib/pf/Switch/Juniper.pm @@ -73,6 +73,7 @@ sub returnAuthorizeWrite { my $status; $radius_reply_ref->{'Juniper-Local-User-Name'} = 'super-user'; $radius_reply_ref->{'Reply-Message'} = "Switch enable access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with write access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeWrite', $args); @@ -93,6 +94,7 @@ sub returnAuthorizeRead { my $status; $radius_reply_ref->{'Juniper-Local-User-Name'} = 'read-only'; $radius_reply_ref->{'Reply-Message'} = "Switch read access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with read access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeRead', $args); diff --git a/lib/pf/Switch/OpenVPN.pm b/lib/pf/Switch/OpenVPN.pm index 0412eaa8a076..11af483f7772 100644 --- a/lib/pf/Switch/OpenVPN.pm +++ b/lib/pf/Switch/OpenVPN.pm @@ -98,6 +98,7 @@ sub returnAuthorizeVPN { my $radius_reply_ref = {}; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'} if exists $args->{'message'}; my $status; # should this node be kicked out? my $kick = $self->handleRadiusDeny($args); diff --git a/lib/pf/Switch/Template.pm b/lib/pf/Switch/Template.pm index aa1da4270b94..eed19a71e088 100644 --- a/lib/pf/Switch/Template.pm +++ b/lib/pf/Switch/Template.pm @@ -570,6 +570,7 @@ sub returnCliAuthorize { %radius_reply = @$attrs; } else { $radius_reply{'Reply-Message'} = "Switch $accessType access granted by PacketFence"; + $radius_reply{'Reply-Message'} = $args->{'message'}." . ".$radius_reply{'Reply-Message'} if exists $args->{'message'}; } $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with $accessType access"); diff --git a/lib/pf/Switch/Ubiquiti/EdgeSwitch.pm b/lib/pf/Switch/Ubiquiti/EdgeSwitch.pm index 92ba0387d0bb..d14c19378517 100644 --- a/lib/pf/Switch/Ubiquiti/EdgeSwitch.pm +++ b/lib/pf/Switch/Ubiquiti/EdgeSwitch.pm @@ -110,6 +110,7 @@ sub returnAuthorizeWrite { my $status; $radius_reply_ref->{'Cisco-AVPair'} = 'shell:priv-lvl=15'; $radius_reply_ref->{'Reply-Message'} = "Switch enable access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with write access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeWrite', $args); @@ -131,6 +132,7 @@ sub returnAuthorizeRead { my $status; $radius_reply_ref->{'Cisco-AVPair'} = 'shell:priv-lvl=3'; $radius_reply_ref->{'Reply-Message'} = "Switch read access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with read access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeRead', $args); diff --git a/lib/pf/Switch/Xirrus.pm b/lib/pf/Switch/Xirrus.pm index bbb0344b47bf..43a58fec865a 100644 --- a/lib/pf/Switch/Xirrus.pm +++ b/lib/pf/Switch/Xirrus.pm @@ -299,6 +299,7 @@ sub returnAuthorizeWrite { my $status; $radius_reply_ref->{'Xirrus-Admin-Role'} = 'read-write'; $radius_reply_ref->{'Reply-Message'} = "Switch enable access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with write access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeWrite', $args); @@ -320,6 +321,7 @@ sub returnAuthorizeRead { my $status; $radius_reply_ref->{'Xirrus-Admin-Role'} = 'read-only'; $radius_reply_ref->{'Reply-Message'} = "Switch read access granted by PacketFence"; + $radius_reply_ref->{'Reply-Message'} = $args->{'message'}." . ".$radius_reply_ref->{'Reply-Message'} if exists $args->{'message'}; $logger->info("User $args->{'user_name'} logged in $args->{'switch'}{'_id'} with read access"); my $filter = pf::access_filter::radius->new; my $rule = $filter->test('returnAuthorizeRead', $args); diff --git a/lib/pf/mfa/Akamai.pm b/lib/pf/mfa/Akamai.pm index ce51e6dc6095..3c54e53ac354 100644 --- a/lib/pf/mfa/Akamai.pm +++ b/lib/pf/mfa/Akamai.pm @@ -113,7 +113,7 @@ our %METHOD_ALIAS =( our %METHOD_LOOKUP =( "push" => "push", - "sms" => "sms_otp", + "sms" => "text_otp", "phone" => "call_otp" ); @@ -127,28 +127,30 @@ sub check_user { my ($self, $username, $otp, $device) = @_; my $logger = get_logger(); my ($devices, $error) = $self->_get_curl("/api/v1/verify/check_user?username=$username"); - + my $message; if ($error == 1) { - $logger->error("Not able to fetch the devices"); - return $FALSE; + $message = "Not able to fetch the devices for user $username"; + $logger->error($message); + return $FALSE, $message; } if (exists($devices->{'result'}->{'policy_decision'})) { if ($devices->{'result'}->{'policy_decision'} eq "bypass") { - $logger->info("Policy decision is bypass, allow access"); - return $TRUE; + $message = "Policy decision is bypass, allow access for user $username"; + $logger->info($message); + return $TRUE, $message; } if ($devices->{'result'}->{'policy_decision'} ne "authenticate_user") { - $logger->error($devices->{'result'}->{'policy_decision'}); - return $FALSE; + $message = $devices->{'result'}->{'policy_decision'}." for user ".$username; + $logger->error($message); + return $FALSE, $message; } } - my @default_device; if (defined($device)) { - @default_device = grep { $_->{'device'} eq $device } @{$devices->{'result'}->{'devices'}}; + @default_device = grep { $_->{'device'} eq $device } @{$devices->{'result'}->{'devices'}}; } else { - @default_device = grep { $_->{'default'} eq "true" } @{$devices->{'result'}->{'devices'}}; + @default_device = $self->select_phone($devices->{'result'}->{'devices'}, $self->radius_mfa_method, undef); } if ($self->radius_mfa_method eq 'push') { @@ -162,40 +164,50 @@ sub check_user { if ( grep $_ eq 'totp', @{$default_device[0]->{'methods'}}) { return $ACTIONS{'totp'}->($self,$default_device[0]->{'device'},$username,$otp,$devices); } else { - $logger->info("Unsupported method totp on device ".$default_device[0]->{'name'}); - return $FALSE; + @default_device = $self->select_phone($devices->{'result'}->{'devices'}, 'totp', undef); + if (!@default_device) { + $message = "No totp support method on device any devices for user $username"; + $logger->info($username); + return $FALSE, $message; + } + return $ACTIONS{'totp'}->($self,$default_device[0]->{'device'},$username,$otp,$devices); } } elsif ($otp =~ /^\d{8,8}$/) { $logger->info("OTP Verification"); return $ACTIONS{'check_auth'}->($self,$default_device[0]->{'device'},$username,$otp,$devices); } elsif ($otp =~ /^(sms|push|phone)(\d?)$/i) { - my @device = $self->select_phone($devices->{'result'}->{'devices'}, $2); my $method = $1; + my @device = $self->select_phone($devices->{'result'}->{'devices'}, $method, $2); foreach my $device (@device) { if ( grep $_ =~ $METHOD_ALIAS{$method}, @{$device->{'methods'}}) { return $ACTIONS{$method}->($self,$device->{'device'},$username,$1,$devices); } else { - $logger->info("Unsupported method on device ".$device->{'name'}); - return $FALSE; + $message = "Unsupported method on device ".$device->{'name'}." for user ".$username; + $logger->info($message); + return $FALSE, $message; } } } else { - $logger->info("Method not supported"); - return $FALSE; + $message = "Method not supported for user $username"; + $logger->info($message); + return $FALSE, $message; } } elsif ($self->radius_mfa_method eq 'sms' || $self->radius_mfa_method eq 'phone') { - my @device = $self->select_phone($devices->{'result'}->{'devices'}, undef); + my @device = $self->select_phone($devices->{'result'}->{'devices'}, $self->radius_mfa_method, undef); foreach my $device (@device) { if ( grep $_ =~ $METHOD_ALIAS{$self->radius_mfa_method}, @{$device->{'methods'}}) { return $ACTIONS{$self->radius_mfa_method}->($self,$device->{'device'},$username,$self->radius_mfa_method); } else { - $logger->info("Unsupported method on device ".$device->{'name'}); - return $FALSE; + $message = "Unsupported method on device ".$device->{'name'}." for user ".$username; + $logger->info($message); + return $FALSE, $message; } } } else { - $logger->error("OTP is empty"); - return $FALSE; + $message = "OTP is empty for user ".$username; + $logger->error($message); + + return $FALSE, $message; } } } @@ -207,7 +219,7 @@ Select the phone to trigger the MFA =cut sub select_phone { - my ($self, $devices, $phone_id) = @_; + my ($self, $devices, $method, $phone_id) = @_; my $logger = get_logger(); my @device; if (defined($phone_id) && $phone_id ne "") { @@ -218,8 +230,7 @@ sub select_phone { # Return the n-1 phone @device = @{$devices}[$phone_id-1]; } else { - # Return the default phone - @device = grep { $_->{'default'} == 1 } @{$devices}; + @device = grep { grep { $_ eq $METHOD_LOOKUP{$method} } @{$_->{'methods'}} } @{$devices}; } return @device; } @@ -233,6 +244,7 @@ totp method sub totp { my ($self, $device, $username, $otp, $devices) = @_; my $logger = get_logger(); + my $message; my $method = "offline_otp"; if (length($otp) == 16) { $method = "bypass_code"; @@ -241,14 +253,17 @@ sub totp { my $post_fields = encode_json({device => $device, method => { $method => {"code" => $otp} } , username => $username}); my ($auth, $error) = $self->_post_curl("/api/v1/verify/start_auth", $post_fields); if ($error) { - return $FALSE; + $message = "Error trigger $method for user $username on $device"; + return $FALSE, $message; } if ($auth->{'result'}->{'status'} eq 'allow') { - $logger->info("Authentication sucessfull on Akamai MFA"); - return $TRUE; + $message = "Authentication sucessfull on Akamai MFA for $username"; + $logger->info($message); + return $TRUE, $message; } - $logger->info("Authentication denied on Akamai MFA, reason: ". $auth->{'result'}->{'status'}->{'deny'}->{'reason'}); - return $FALSE; + $message = "Authentication denied on Akamai MFA, reason: ". $auth->{'result'}->{'status'}->{'deny'}->{'reason'}; + $logger->info($message); + return $FALSE, $message; } =head2 generic_method @@ -260,6 +275,7 @@ generic method sub generic_method { my ($self, $device, $username, $method) =@_; my $logger = get_logger(); + my $message; $logger->info("Trigger $method for user $username"); my $post_fields = encode_json({device => $device, method => $METHOD_LOOKUP{$method}, username => $username}); my ($auth, $error)= cache->compute($device.$METHOD_LOOKUP{$method}, {expires_in => normalize_time($self->cache_duration)}, sub { @@ -267,7 +283,8 @@ sub generic_method { } ); if ($error) { - return $FALSE; + $message = "Error triggering $method for user $username"; + return $FALSE, $message; } # Cache the method to fetch it on the 2nd radius request (TODO: cache expiration should be in config). if (!cache->get($username)) { @@ -279,7 +296,7 @@ sub generic_method { } # Remove the authenticated status of the user since the next radius requests will use OTP cache->remove($username." authenticated"); - return $FALSE; + return $FALSE, "Authentication rejected for user $username, expect a new request with OTP"; } =head2 push @@ -298,19 +315,19 @@ sub push { } ); if ($error) { - return + return $FALSE, "Error trigerring the push for user $username"; } my $i = 0; while($TRUE) { my ($answer, $error) = $self->_get_curl("/api/v1/verify/check_auth?tx=".$auth->{'result'}->{'tx'}); return $FALSE if $error; if ($answer->{'result'} eq 'allow') { - return $TRUE; + return $TRUE, "Push succeeded for user $username"; } sleep(5); last if ($i++ == 6); } - return $FALSE; + return $FALSE , "Push failed for user $username"; } =head2 @@ -322,15 +339,23 @@ check_auth sub check_auth { my ($self, $device, $username, $otp, $devices) = @_; my $logger = get_logger(); + my $message; if (my $infos = cache->get($username)) { my $post_fields = encode_json({tx => $infos->{'tx'}, user_input => $otp}); my ($return, $error) = $self->_get_curl("/api/v1/verify/check_auth?tx=".$infos->{'tx'}."&user_input=".$otp); - return $FALSE if $error; + if ($error) { + $message = "Error trying to verify the OTP code for user $username" + $logger->error($message); + return $FALSE, $message; + } if ($return->{'result'} eq 'allow') { - $logger->info("Authentication successfull"); - return $TRUE; + $message = "Authentication successfull for user $username"; + $logger->info($message); + return $TRUE, $message; } else { - return $FALSE; + $message = "Authentication failed for user $username"; + $logger->error($message); + return $FALSE, $message; } } else { foreach my $device (@{$devices->{'result'}->{'devices'}}) { diff --git a/lib/pf/mfa/TOTP.pm b/lib/pf/mfa/TOTP.pm index f2293f0aeb78..f70f56142c21 100644 --- a/lib/pf/mfa/TOTP.pm +++ b/lib/pf/mfa/TOTP.pm @@ -50,12 +50,14 @@ Get the devices of the user sub check_user { my ($self, $username, $otp, $device) = @_; my $logger = get_logger(); + my $message; if ($self->radius_mfa_method eq 'strip-otp' || $self->radius_mfa_method eq 'second-password') { if ($otp =~ /^\d{6,6}$/) { return $self->verify_otp($username, $otp); } else { - $logger->warn("Method not supported"); - return $FALSE; + $message = "Method not supported for user $username"; + $logger->warn($message); + return $FALSE, $message; } } } @@ -63,19 +65,23 @@ sub check_user { sub verify_otp { my ($self, $username, $otp) = @_; my $logger = get_logger(); + my $message; my $person = person_view($username); if (defined $person->{otp} && $person->{otp} ne '') { my $local_otp = $self->generateCurrentNumber($person->{otp}); if ($otp == $local_otp) { $self->set_mfa_success($username); - $logger->info("OTP token match"); - return $TRUE; + $message = "OTP token match for user $username"; + $logger->info($message); + return $TRUE, $message; } - $logger->info("OTP token doesnt match"); - return $FALSE; + $message = "OTP token doesnt match for user $username"; + $logger->info($message); + return $FALSE, $message; } - $logger->info("The user who try to authenticate hasn't enrolled"); - return $FALSE; + $message = "The user who try to authenticate hasn't enrolled"; + $logger->info($message); + return $FALSE, $message; } sub generateCurrentNumber { diff --git a/lib/pf/radius.pm b/lib/pf/radius.pm index b7432a09b9aa..c477773b78f6 100644 --- a/lib/pf/radius.pm +++ b/lib/pf/radius.pm @@ -913,7 +913,7 @@ sub vpn { my $return = $self->mfa_pre_auth($args, $options, $sources, $extra, $otp, $password); return $return if (ref($return) eq 'ARRAY'); - + $args->{'message'} = $return; if (defined($mac)) { Log::Log4perl::MDC->put( 'mac', $mac ); my $role_obj = new pf::role::custom(); @@ -979,7 +979,7 @@ sub vpn { } $return = $self->mfa_post_auth($args, $options, $sources, $source_id, $extra ,$otp, $password); return $return if (ref($return) eq 'ARRAY'); - + $args->{'message'} = $return; return $self->returnRadiusVpn($args, $options, $sources, $source_id, $extra); } @@ -994,8 +994,8 @@ sub cli { my $source_id = \@$sources; my $return = $self->mfa_pre_auth($args, $options, $sources, $extra, $otp, $password); return $return if (ref($return) eq 'ARRAY'); - - return $self->returnRadiusCli($args, $options, $sources, $source_id, $extra) if $return eq $TRUE; + $args->{'message'} = $return; + return $self->returnRadiusCli($args, $options, $sources, $source_id, $extra) if $return; if (!defined($args->{'radius_request'}->{'MS-CHAP-Challenge'}) && ( !exists($args->{'radius_request'}->{"EAP-Type"}) || ( exists($args->{'radius_request'}->{"EAP-Type"}) && $args->{'radius_request'}->{"EAP-Type"} != $EAP_TLS && $args->{'radius_request'}->{"EAP-Type"} != $MS_EAP_AUTHENTICATION ) ) ) { my $return = $self->authenticate($args, $sources, \$source_id, $extra, $otp, $password); @@ -1004,7 +1004,7 @@ sub cli { $return = $self->mfa_post_auth($args, $options, $sources, $source_id, $extra ,$otp, $password); return $return if (ref($return) eq 'ARRAY'); - + $args->{'message'} = $return; return $self->returnRadiusCli($args, $options, $sources, $source_id, $extra); } @@ -1081,12 +1081,14 @@ sub mfa_post_auth { my $cache = pf::mfa->cache; if (!$cache->get($args->{'radius_request'}->{'User-Name'}." authenticated")) { $cache->set($args->{'radius_request'}->{'User-Name'}." authenticated", $TRUE, normalize_time($mfa->cache_duration)); + return "Authenticated, waiting for the OTP code"; } } else { - my $result = $mfa->check_user($args->{'radius_request'}->{'User-Name'}, $$otp); + my ($result, $message) = $mfa->check_user($args->{'radius_request'}->{'User-Name'}, $$otp); if ($result != $TRUE) { - return [ $RADIUS::RLM_MODULE_FAIL, ('Reply-Message' => "Multi-Factor Authentication failed or triggered") ]; + return [ $RADIUS::RLM_MODULE_FAIL, ('Reply-Message' => $message) ]; } + return $message; } } } @@ -1130,14 +1132,15 @@ sub mfa_pre_auth { if ($mfa->radius_mfa_method eq 'strip-otp' || $mfa->radius_mfa_method eq 'sms' || $mfa->radius_mfa_method eq 'phone') { # Previously did a authentication request ? if (my $infos = $cache->get($args->{'radius_request'}->{'User-Name'})) { - my $result = $mfa->check_user($args->{'radius_request'}->{'User-Name'}, $$password, $infos->{'device'}); + my ($result, $message)= $mfa->check_user($args->{'radius_request'}->{'User-Name'}, $$password, $infos->{'device'}); if ($result != $TRUE) { - return [ $RADIUS::RLM_MODULE_FAIL, ('Reply-Message' => "MFA verification failed") ]; + return [ $RADIUS::RLM_MODULE_FAIL, ('Reply-Message' => $message) ]; } else { if ($caller eq "pf::radius::vpn") { + $args->{'message'} = $message; return $self->returnRadiusVpn($args, $options, $sources, $source_id, $extra); } else { - return $TRUE; + return $message; } } } @@ -1147,26 +1150,28 @@ sub mfa_pre_auth { } elsif ($mfa->radius_mfa_method eq 'second-password') { if (my $authenticated = $cache->get($args->{'radius_request'}->{'User-Name'}." authenticated")) { if ($authenticated) { - my $result = $mfa->check_user($args->{'radius_request'}->{'User-Name'}, $$password); + my ($result, $message) = $mfa->check_user($args->{'radius_request'}->{'User-Name'}, $$password); if ($result != $TRUE) { - return [ $RADIUS::RLM_MODULE_FAIL, ('Reply-Message' => "MFA verification failed")]; + return [ $RADIUS::RLM_MODULE_FAIL, ('Reply-Message' => $message)]; } else { if ($caller eq "pf::radius::vpn") { + $args->{'message'} = $message; return $self->returnRadiusVpn($args, $options, $sources, $source_id, $extra); } else { - return $TRUE; + return $message; } } } else { my $device = $cache->get($args->{'radius_request'}->{'User-Name'}); - my $result = $mfa->check_user($args->{'radius_request'}->{'User-Name'}, $$password, $device); + my ($result, $message) = $mfa->check_user($args->{'radius_request'}->{'User-Name'}, $$password, $device); if ($result != $TRUE) { - return [ $RADIUS::RLM_MODULE_FAIL, ('Reply-Message' => "MFA verification failed") ]; + return [ $RADIUS::RLM_MODULE_FAIL, ('Reply-Message' => $message) ]; } else { if ($caller eq "pf::radius::vpn") { + $args->{'message'} = $message; return $self->returnRadiusVpn($args, $options, $sources, $source_id, $extra); } else { - return $TRUE; + return $message; } } } From e621d2ca87c3233d1b82c0a0202769857875ba52 Mon Sep 17 00:00:00 2001 From: Durand Date: Tue, 3 Dec 2024 17:17:51 -0500 Subject: [PATCH 148/176] Updated db connection (#8419) --- go/cmd/pfdhcp/api.go | 56 ++++++++++++++++++--------------- go/cmd/pfdhcp/config.go | 11 ++++--- go/cmd/pfdhcp/keysoption.go | 20 ++++++------ go/cmd/pfdhcp/main.go | 59 +++++++++++++++++------------------ go/cmd/pfdhcp/server.go | 3 +- go/cmd/pfdhcp/utils.go | 35 +++++++++++---------- go/cmd/pfdhcp/workers_pool.go | 4 ++- 7 files changed, 98 insertions(+), 90 deletions(-) diff --git a/go/cmd/pfdhcp/api.go b/go/cmd/pfdhcp/api.go index f640f46265a9..7a8058fbe05a 100644 --- a/go/cmd/pfdhcp/api.go +++ b/go/cmd/pfdhcp/api.go @@ -1,12 +1,12 @@ package main import ( + "database/sql" "encoding/binary" "encoding/json" "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "strconv" @@ -21,6 +21,10 @@ import ( "github.com/inverse-inc/packetfence/go/pfconfigdriver" ) +type API struct { + DB *sql.DB +} + // Node struct type Node struct { Mac string `json:"mac"` @@ -82,7 +86,7 @@ type OptionsFromFilter struct { Type string `json:"type"` } -func handleIP2Mac(res http.ResponseWriter, req *http.Request) { +func (a *API) handleIP2Mac(res http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) if index, expiresAt, found := GlobalIPCache.GetWithExpiration(vars["ip"]); found { @@ -102,7 +106,7 @@ func handleIP2Mac(res http.ResponseWriter, req *http.Request) { return } -func handleMac2Ip(res http.ResponseWriter, req *http.Request) { +func (a *API) handleMac2Ip(res http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) if index, expiresAt, found := GlobalMacCache.GetWithExpiration(vars["mac"]); found { @@ -122,7 +126,7 @@ func handleMac2Ip(res http.ResponseWriter, req *http.Request) { return } -func handleAllStats(res http.ResponseWriter, req *http.Request) { +func (a *API) handleAllStats(res http.ResponseWriter, req *http.Request) { var result Items var interfaces pfconfigdriver.ListenInts pfconfigdriver.FetchDecodeSocket(ctx, &interfaces) @@ -132,7 +136,7 @@ func handleAllStats(res http.ResponseWriter, req *http.Request) { } for _, i := range interfaces.Element { if h, ok := intNametoInterface[i]; ok { - stat := h.handleAPIReq(APIReq{Req: "stats", NetInterface: i, NetWork: ""}) + stat := h.handleAPIReq(APIReq{Req: "stats", NetInterface: i, NetWork: ""}, a.DB) for _, s := range stat.([]Stats) { result.Items = append(result.Items, s) } @@ -151,11 +155,11 @@ func handleAllStats(res http.ResponseWriter, req *http.Request) { return } -func handleStats(res http.ResponseWriter, req *http.Request) { +func (a *API) handleStats(res http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) if h, ok := intNametoInterface[vars["int"]]; ok { - stat := h.handleAPIReq(APIReq{Req: "stats", NetInterface: vars["int"], NetWork: vars["network"]}) + stat := h.handleAPIReq(APIReq{Req: "stats", NetInterface: vars["int"], NetWork: vars["network"]}, a.DB) outgoingJSON, err := json.Marshal(stat) @@ -172,11 +176,11 @@ func handleStats(res http.ResponseWriter, req *http.Request) { return } -func handleDuplicates(res http.ResponseWriter, req *http.Request) { +func (a *API) handleDuplicates(res http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) if h, ok := intNametoInterface[vars["int"]]; ok { - stat := h.handleAPIReq(APIReq{Req: "duplicates", NetInterface: vars["int"], NetWork: vars["network"]}) + stat := h.handleAPIReq(APIReq{Req: "duplicates", NetInterface: vars["int"], NetWork: vars["network"]}, a.DB) outgoingJSON, err := json.Marshal(stat) @@ -193,11 +197,11 @@ func handleDuplicates(res http.ResponseWriter, req *http.Request) { return } -func handleDebug(res http.ResponseWriter, req *http.Request) { +func (a *API) handleDebug(res http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) if h, ok := intNametoInterface[vars["int"]]; ok { - stat := h.handleAPIReq(APIReq{Req: "debug", NetInterface: vars["int"], Role: vars["role"]}) + stat := h.handleAPIReq(APIReq{Req: "debug", NetInterface: vars["int"], Role: vars["role"]}, a.DB) outgoingJSON, err := json.Marshal(stat) @@ -213,7 +217,7 @@ func handleDebug(res http.ResponseWriter, req *http.Request) { return } -func handleReleaseIP(res http.ResponseWriter, req *http.Request) { +func (a *API) handleReleaseIP(res http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) _ = InterfaceScopeFromMac(vars["mac"]) @@ -226,11 +230,11 @@ func handleReleaseIP(res http.ResponseWriter, req *http.Request) { } } -func handleOverrideOptions(res http.ResponseWriter, req *http.Request) { +func (a *API) handleOverrideOptions(res http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) - body, err := ioutil.ReadAll(io.LimitReader(req.Body, 1048576)) + body, err := io.ReadAll(io.LimitReader(req.Body, 1048576)) if err != nil { panic(err) } @@ -239,7 +243,7 @@ func handleOverrideOptions(res http.ResponseWriter, req *http.Request) { } // Insert information in MySQL - _ = MysqlInsert(vars["mac"], sharedutils.ConvertToString(body)) + _ = MysqlInsert(vars["mac"], sharedutils.ConvertToString(body), a.DB) var result = &Info{Mac: vars["mac"], Status: "ACK"} @@ -250,11 +254,11 @@ func handleOverrideOptions(res http.ResponseWriter, req *http.Request) { } } -func handleOverrideNetworkOptions(res http.ResponseWriter, req *http.Request) { +func (a *API) handleOverrideNetworkOptions(res http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) - body, err := ioutil.ReadAll(io.LimitReader(req.Body, 1048576)) + body, err := io.ReadAll(io.LimitReader(req.Body, 1048576)) if err != nil { panic(err) } @@ -263,7 +267,7 @@ func handleOverrideNetworkOptions(res http.ResponseWriter, req *http.Request) { } // Insert information in MySQL - _ = MysqlInsert(vars["network"], sharedutils.ConvertToString(body)) + _ = MysqlInsert(vars["network"], sharedutils.ConvertToString(body), a.DB) var result = &Info{Network: vars["network"], Status: "ACK"} @@ -274,13 +278,13 @@ func handleOverrideNetworkOptions(res http.ResponseWriter, req *http.Request) { } } -func handleRemoveOptions(res http.ResponseWriter, req *http.Request) { +func (a *API) handleRemoveOptions(res http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) var result = &Info{Mac: vars["mac"], Status: "ACK"} - err := MysqlDel(vars["mac"]) + err := MysqlDel(vars["mac"], a.DB) if !err { result = &Info{Mac: vars["mac"], Status: "NAK"} } @@ -291,13 +295,13 @@ func handleRemoveOptions(res http.ResponseWriter, req *http.Request) { } } -func handleRemoveNetworkOptions(res http.ResponseWriter, req *http.Request) { +func (a *API) handleRemoveNetworkOptions(res http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) var result = &Info{Network: vars["network"], Status: "ACK"} - err := MysqlDel(vars["network"]) + err := MysqlDel(vars["network"], a.DB) if !err { result = &Info{Network: vars["network"], Status: "NAK"} } @@ -308,9 +312,9 @@ func handleRemoveNetworkOptions(res http.ResponseWriter, req *http.Request) { } } -func decodeOptions(b string) (map[dhcp.OptionCode][]byte, error) { +func decodeOptions(b string, db *sql.DB) (map[dhcp.OptionCode][]byte, error) { var options []Options - _, value := MysqlGet(b) + _, value := MysqlGet(b, db) decodedValue := sharedutils.ConvertToByte(value) var dhcpOptions = make(map[dhcp.OptionCode][]byte) if err := json.Unmarshal(decodedValue, &options); err != nil { @@ -360,7 +364,7 @@ func extractMembers(v Network) ([]Node, []string, int) { return Members, Macs, Count } -func (h *Interface) handleAPIReq(Request APIReq) interface{} { +func (h *Interface) handleAPIReq(Request APIReq, db *sql.DB) interface{} { var stats []Stats if Request.Req == "duplicates" { @@ -399,7 +403,7 @@ func (h *Interface) handleAPIReq(Request APIReq) interface{} { } // Add network options on the fly - x, err := decodeOptions(v.network.IP.String()) + x, err := decodeOptions(v.network.IP.String(), db) if err == nil { for key, value := range x { Options[key.String()] = Tlv.Tlvlist[int(key)].Transform.String(value) diff --git a/go/cmd/pfdhcp/config.go b/go/cmd/pfdhcp/config.go index 17fd4a038294..d4ced741d412 100644 --- a/go/cmd/pfdhcp/config.go +++ b/go/cmd/pfdhcp/config.go @@ -1,6 +1,7 @@ package main import ( + "database/sql" "encoding/binary" "math" "net" @@ -68,7 +69,7 @@ func newDHCPConfig() *Interfaces { return &p } -func (d *Interfaces) readConfig() { +func (d *Interfaces) readConfig(MyDB *sql.DB) { interfaces := pfconfigdriver.GetType[pfconfigdriver.ListenInts](ctx) DHCPinterfaces := pfconfigdriver.GetType[pfconfigdriver.DHCPInts](ctx) portal := pfconfigdriver.GetType[pfconfigdriver.PfConfCaptivePortal](ctx) @@ -246,7 +247,7 @@ func (d *Interfaces) readConfig() { } DHCPScope.dstIp = dstReplyIp // Initialize dhcp pool - available, _ := pool.Create(ctx, backend, uint64(dhcp.IPRange(ip, ips)), DHCPNet.network.IP.String()+Role, algorithm, StatsdClient, MySQLdatabase) + available, _ := pool.Create(ctx, backend, uint64(dhcp.IPRange(ip, ips)), DHCPNet.network.IP.String()+Role, algorithm, StatsdClient, MyDB) DHCPScope.available = available @@ -267,7 +268,7 @@ func (d *Interfaces) readConfig() { DHCPScope.xid = xid wg.Add(1) go func() { - initiaLease(DHCPScope, ConfNet) + initiaLease(DHCPScope, ConfNet, MyDB) wg.Done() }() var options = make(map[dhcp.OptionCode][]byte) @@ -326,7 +327,7 @@ func (d *Interfaces) readConfig() { } DHCPScope.dstIp = dstReplyIp // Initialize dhcp pool - available, _ := pool.Create(ctx, backend, uint64(dhcp.IPRange(net.ParseIP(ConfNet.DhcpStart), net.ParseIP(ConfNet.DhcpEnd))), DHCPNet.network.IP.String(), algorithm, StatsdClient, MySQLdatabase) + available, _ := pool.Create(ctx, backend, uint64(dhcp.IPRange(net.ParseIP(ConfNet.DhcpStart), net.ParseIP(ConfNet.DhcpEnd))), DHCPNet.network.IP.String(), algorithm, StatsdClient, MyDB) DHCPScope.available = available @@ -347,7 +348,7 @@ func (d *Interfaces) readConfig() { DHCPScope.xid = xid wg.Add(1) go func() { - initiaLease(DHCPScope, ConfNet) + initiaLease(DHCPScope, ConfNet, MyDB) wg.Done() }() diff --git a/go/cmd/pfdhcp/keysoption.go b/go/cmd/pfdhcp/keysoption.go index 4d2619413d77..9fabf1a54dbf 100644 --- a/go/cmd/pfdhcp/keysoption.go +++ b/go/cmd/pfdhcp/keysoption.go @@ -1,16 +1,18 @@ package main import ( + "database/sql" + "github.com/inverse-inc/go-utils/log" ) // MysqlInsert function -func MysqlInsert(key string, value string) bool { - if err := MySQLdatabase.PingContext(ctx); err != nil { +func MysqlInsert(key string, value string, db *sql.DB) bool { + if err := db.PingContext(ctx); err != nil { log.LoggerWContext(ctx).Error("Unable to ping database, reconnect: " + err.Error()) } - _, err := MySQLdatabase.Exec( + _, err := db.Exec( ` INSERT into key_value_storage values(?,?) ON DUPLICATE KEY UPDATE value = VALUES(value) @@ -28,11 +30,11 @@ ON DUPLICATE KEY UPDATE value = VALUES(value) } // MysqlGet function -func MysqlGet(key string) (string, string) { - if err := MySQLdatabase.PingContext(ctx); err != nil { +func MysqlGet(key string, db *sql.DB) (string, string) { + if err := db.PingContext(ctx); err != nil { log.LoggerWContext(ctx).Error("Unable to ping database, reconnect: " + err.Error()) } - rows, err := MySQLdatabase.Query("select id, value from key_value_storage where id = ?", "/dhcpd/"+key) + rows, err := db.Query("select id, value from key_value_storage where id = ?", "/dhcpd/"+key) defer rows.Close() if err != nil { log.LoggerWContext(ctx).Debug("Error while getting MySQL '" + key + "': " + err.Error()) @@ -52,11 +54,11 @@ func MysqlGet(key string) (string, string) { } // MysqlDel function -func MysqlDel(key string) bool { - if err := MySQLdatabase.PingContext(ctx); err != nil { +func MysqlDel(key string, db *sql.DB) bool { + if err := db.PingContext(ctx); err != nil { log.LoggerWContext(ctx).Error("Unable to ping database, reconnect: " + err.Error()) } - rows, err := MySQLdatabase.Query("delete from key_value_storage where id = ?", "/dhcpd/"+key) + rows, err := db.Query("delete from key_value_storage where id = ?", "/dhcpd/"+key) defer rows.Close() if err != nil { log.LoggerWContext(ctx).Error("Error while deleting MySQL key '" + key + "': " + err.Error()) diff --git a/go/cmd/pfdhcp/main.go b/go/cmd/pfdhcp/main.go index 171a8831ed56..3e1432f751a4 100644 --- a/go/cmd/pfdhcp/main.go +++ b/go/cmd/pfdhcp/main.go @@ -32,9 +32,6 @@ import ( // DHCPConfig global var var DHCPConfig *Interfaces -// MySQLdatabase global var -var MySQLdatabase *sql.DB - // GlobalIPCache global var var GlobalIPCache *cache.Cache @@ -97,12 +94,12 @@ func main() { // Read DB config configDatabase := pfconfigdriver.GetType[pfconfigdriver.PfConfDatabase](ctx) - connectDB(configDatabase) + MyDB := connectDB(configDatabase) // Keep the db alive - go func() { + go func(*sql.DB) { for { - err := MySQLdatabase.Ping() + err := MyDB.Ping() if err != nil { log.LoggerWContext(ctx).Error("Unable to ping DB: " + err.Error()) } else { @@ -110,12 +107,12 @@ func main() { } time.Sleep(5 * time.Second) } - }() + }(MyDB) VIP = make(map[string]bool) VIPIp = make(map[string]net.IP) - go func() { + go func(*sql.DB) { var DHCPinterfaces pfconfigdriver.DHCPInts pfconfigdriver.FetchDecodeSocket(ctx, &DHCPinterfaces) var interfaces pfconfigdriver.ListenInts @@ -132,11 +129,11 @@ func main() { } for { - DHCPConfig.detectVIP(sharedutils.RemoveDuplicates(append(interfaces.Element, intDhcp...))) + DHCPConfig.detectVIP(sharedutils.RemoveDuplicates(append(interfaces.Element, intDhcp...)), MyDB) time.Sleep(3 * time.Second) } - }() + }(MyDB) go func() { var err error @@ -161,7 +158,7 @@ func main() { // Read pfconfig DHCPConfig = newDHCPConfig() - DHCPConfig.readConfig() + DHCPConfig.readConfig(MyDB) webservices := pfconfigdriver.GetType[pfconfigdriver.PfConfWebservices](ctx) // Queue value @@ -202,21 +199,21 @@ func main() { v.run(ctx, jobs) }() } - + api := &API{DB: MyDB} // Api router := mux.NewRouter() - router.HandleFunc("/api/v1/dhcp/mac/{mac:(?:[0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}}", handleMac2Ip).Methods("GET") - router.HandleFunc("/api/v1/dhcp/mac/{mac:(?:[0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}}", handleReleaseIP).Methods("DELETE") - router.HandleFunc("/api/v1/dhcp/ip/{ip:(?:[0-9]{1,3}.){3}(?:[0-9]{1,3})}", handleIP2Mac).Methods("GET") - router.HandleFunc("/api/v1/dhcp/stats", handleAllStats).Methods("GET") - router.HandleFunc("/api/v1/dhcp/stats/{int:.*}/{network:(?:[0-9]{1,3}.){3}(?:[0-9]{1,3})}", handleStats).Methods("GET") - router.HandleFunc("/api/v1/dhcp/stats/{int:.*}", handleStats).Methods("GET") - router.HandleFunc("/api/v1/dhcp/debug/{int:.*}/{role:(?:[^/]*)}", handleDebug).Methods("GET") - router.HandleFunc("/api/v1/dhcp/detect_duplicates/{int:.*}", handleDuplicates).Methods("GET") - router.HandleFunc("/api/v1/dhcp/options/network/{network:(?:[0-9]{1,3}.){3}(?:[0-9]{1,3})}", handleOverrideNetworkOptions).Methods("POST") - router.HandleFunc("/api/v1/dhcp/options/network/{network:(?:[0-9]{1,3}.){3}(?:[0-9]{1,3})}", handleRemoveNetworkOptions).Methods("DELETE") - router.HandleFunc("/api/v1/dhcp/options/mac/{mac:(?:[0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}}", handleOverrideOptions).Methods("POST") - router.HandleFunc("/api/v1/dhcp/options/mac/{mac:(?:[0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}}", handleRemoveOptions).Methods("DELETE") + router.HandleFunc("/api/v1/dhcp/mac/{mac:(?:[0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}}", api.handleMac2Ip).Methods("GET") + router.HandleFunc("/api/v1/dhcp/mac/{mac:(?:[0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}}", api.handleReleaseIP).Methods("DELETE") + router.HandleFunc("/api/v1/dhcp/ip/{ip:(?:[0-9]{1,3}.){3}(?:[0-9]{1,3})}", api.handleIP2Mac).Methods("GET") + router.HandleFunc("/api/v1/dhcp/stats", api.handleAllStats).Methods("GET") + router.HandleFunc("/api/v1/dhcp/stats/{int:.*}/{network:(?:[0-9]{1,3}.){3}(?:[0-9]{1,3})}", api.handleStats).Methods("GET") + router.HandleFunc("/api/v1/dhcp/stats/{int:.*}", api.handleStats).Methods("GET") + router.HandleFunc("/api/v1/dhcp/debug/{int:.*}/{role:(?:[^/]*)}", api.handleDebug).Methods("GET") + router.HandleFunc("/api/v1/dhcp/detect_duplicates/{int:.*}", api.handleDuplicates).Methods("GET") + router.HandleFunc("/api/v1/dhcp/options/network/{network:(?:[0-9]{1,3}.){3}(?:[0-9]{1,3})}", api.handleOverrideNetworkOptions).Methods("POST") + router.HandleFunc("/api/v1/dhcp/options/network/{network:(?:[0-9]{1,3}.){3}(?:[0-9]{1,3})}", api.handleRemoveNetworkOptions).Methods("DELETE") + router.HandleFunc("/api/v1/dhcp/options/mac/{mac:(?:[0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}}", api.handleOverrideOptions).Methods("POST") + router.HandleFunc("/api/v1/dhcp/options/mac/{mac:(?:[0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}}", api.handleRemoveOptions).Methods("DELETE") http.Handle("/", httpauth.SimpleBasicAuth(webservices.User, webservices.Pass)(router)) srv := &http.Server{ @@ -277,7 +274,7 @@ func (I *Interface) runUnicast(ctx context.Context, jobs chan job) { } // ServeDHCP function is the main function that will deal with the dhcp packet -func (I *Interface) ServeDHCP(ctx context.Context, p dhcp.Packet, msgType dhcp.MessageType, srcIP net.Addr, srvIP net.IP) (answer Answer) { +func (I *Interface) ServeDHCP(ctx context.Context, p dhcp.Packet, msgType dhcp.MessageType, srcIP net.Addr, srvIP net.IP, db *sql.DB) (answer Answer) { var handler DHCPHandler var NetScope net.IPNet @@ -303,7 +300,7 @@ func (I *Interface) ServeDHCP(ctx context.Context, p dhcp.Packet, msgType dhcp.M if x, found := NodeCache.Get(answer.MAC.String()); found { node = x.(NodeInfo) } else { - node = NodeInformation(ctx, answer.MAC) + node = NodeInformation(ctx, answer.MAC, db) NodeCache.Set(answer.MAC.String(), node, 3*time.Second) } @@ -558,7 +555,7 @@ func (I *Interface) ServeDHCP(ctx context.Context, p dhcp.Packet, msgType dhcp.M leaseDuration := handler.leaseDuration // Add network options on the fly - x, err := decodeOptions(NetScope.IP.String()) + x, err := decodeOptions(NetScope.IP.String(), db) if err == nil { for key, value := range x { if key == dhcp.OptionIPAddressLeaseTime { @@ -584,7 +581,7 @@ func (I *Interface) ServeDHCP(ctx context.Context, p dhcp.Packet, msgType dhcp.M leaseDuration = 0 } // Add device (mac) options on the fly - x, err = decodeOptions(answer.MAC.String()) + x, err = decodeOptions(answer.MAC.String(), db) if err == nil { for key, value := range x { if key == dhcp.OptionIPAddressLeaseTime { @@ -696,9 +693,9 @@ func (I *Interface) ServeDHCP(ctx context.Context, p dhcp.Packet, msgType dhcp.M GlobalOptions = options leaseDuration := handler.leaseDuration // Add network options - AddDevicesOptions(NetScope.IP.String(), &leaseDuration, GlobalOptions) + AddDevicesOptions(NetScope.IP.String(), &leaseDuration, GlobalOptions, db) // Add device options - AddDevicesOptions(answer.MAC.String(), &leaseDuration, GlobalOptions) + AddDevicesOptions(answer.MAC.String(), &leaseDuration, GlobalOptions, db) info = GetFromGlobalFilterCache(msgType.String(), answer.MAC.String(), Options) // Add options on the fly from pffilter reject := AddPffilterDevicesOptions(info, GlobalOptions) @@ -723,7 +720,7 @@ func (I *Interface) ServeDHCP(ctx context.Context, p dhcp.Packet, msgType dhcp.M // Update Global Caches GlobalIPCache.Set(reqIP.String(), answer.MAC.String(), cacheDuration) GlobalMacCache.Set(answer.MAC.String(), reqIP.String(), cacheDuration) - err := MysqlUpdateIP4Log(answer.MAC.String(), reqIP.String(), cacheDuration) + err := MysqlUpdateIP4Log(answer.MAC.String(), reqIP.String(), cacheDuration, db) if err != nil { log.LoggerWContext(ctx).Info(err.Error()) } diff --git a/go/cmd/pfdhcp/server.go b/go/cmd/pfdhcp/server.go index abdfdb7da0bc..f2e9d949e864 100644 --- a/go/cmd/pfdhcp/server.go +++ b/go/cmd/pfdhcp/server.go @@ -2,6 +2,7 @@ package main import ( "context" + "database/sql" "net" dhcp "github.com/inverse-inc/dhcp4" @@ -19,7 +20,7 @@ type Answer struct { // Handler interface type Handler interface { - ServeDHCP(ctx context.Context, req dhcp.Packet, msgType dhcp.MessageType, srcIP net.Addr, srvIP net.IP) Answer + ServeDHCP(ctx context.Context, req dhcp.Packet, msgType dhcp.MessageType, srcIP net.Addr, srvIP net.IP, db *sql.DB) Answer } // ServeConn is the bare minimum connection functions required by Serve() diff --git a/go/cmd/pfdhcp/utils.go b/go/cmd/pfdhcp/utils.go index a21435099fc0..be87933e766f 100644 --- a/go/cmd/pfdhcp/utils.go +++ b/go/cmd/pfdhcp/utils.go @@ -2,6 +2,7 @@ package main import ( "context" + "database/sql" "encoding/binary" "fmt" "math/rand" @@ -32,14 +33,14 @@ type NodeInfo struct { } // connectDB connect to the database -func connectDB(configDatabase *pfconfigdriver.PfConfDatabase) { +func connectDB(configDatabase *pfconfigdriver.PfConfDatabase) *sql.DB { db, err := db.DbFromConfig(ctx) sharedutils.CheckError(err) - MySQLdatabase = db + return db } // initiaLease fetch the database to remove already assigned ip addresses -func initiaLease(dhcpHandler *DHCPHandler, ConfNet pfconfigdriver.RessourseNetworkConf) { +func initiaLease(dhcpHandler *DHCPHandler, ConfNet pfconfigdriver.RessourseNetworkConf, db *sql.DB) { // Need to calculate the end ip because of the ip per role feature now := time.Now() endip := binary.BigEndian.Uint32(dhcpHandler.start.To4()) + uint32(dhcpHandler.leaseRange) - uint32(1) @@ -47,7 +48,7 @@ func initiaLease(dhcpHandler *DHCPHandler, ConfNet pfconfigdriver.RessourseNetwo binary.BigEndian.PutUint32(a, endip) ipend := net.IPv4(a[0], a[1], a[2], a[3]) - rows, err := MySQLdatabase.Query("select ip,mac,end_time,start_time from ip4log i where inet_aton(ip) between inet_aton(?) and inet_aton(?) and (end_time = '"+ZeroDate+"' OR end_time > NOW()) and end_time in (select MAX(end_time) from ip4log where mac = i.mac) ORDER BY mac,end_time desc", dhcpHandler.start.String(), ipend.String()) + rows, err := db.Query("select ip,mac,end_time,start_time from ip4log i where inet_aton(ip) between inet_aton(?) and inet_aton(?) and (end_time = '"+ZeroDate+"' OR end_time > NOW()) and end_time in (select MAX(end_time) from ip4log where mac = i.mac) ORDER BY mac,end_time desc", dhcpHandler.start.String(), ipend.String()) if err != nil { log.LoggerWContext(ctx).Error(err.Error()) return @@ -121,7 +122,7 @@ func InterfaceScopeFromMac(MAC string) string { } // Detect the vip on each interfaces -func (d *Interfaces) detectVIP(interfaces []string) { +func (d *Interfaces) detectVIP(interfaces []string, db *sql.DB) { var keyConfCluster pfconfigdriver.NetInterface keyConfCluster.PfconfigNS = "config::Pf(CLUSTER," + pfconfigdriver.FindClusterName(ctx) + ")" @@ -151,7 +152,7 @@ func (d *Interfaces) detectVIP(interfaces []string) { if VIP[v] == false { log.LoggerWContext(ctx).Info(v + " got the VIP") if h, ok := intNametoInterface[v]; ok { - go h.handleAPIReq(APIReq{Req: "initialease", NetInterface: v, NetWork: ""}) + go h.handleAPIReq(APIReq{Req: "initialease", NetInterface: v, NetWork: ""}, db) } VIP[v] = true } @@ -164,9 +165,9 @@ func (d *Interfaces) detectVIP(interfaces []string) { } // NodeInformation return the node information -func NodeInformation(ctx context.Context, target net.HardwareAddr) (r NodeInfo) { +func NodeInformation(ctx context.Context, target net.HardwareAddr, db *sql.DB) (r NodeInfo) { - rows, err := MySQLdatabase.Query("SELECT mac, status, IF(ISNULL(nc.name), '', nc.name) as category FROM node LEFT JOIN node_category as nc on node.category_id = nc.category_id WHERE mac = ?", target.String()) + rows, err := db.Query("SELECT mac, status, IF(ISNULL(nc.name), '', nc.name) as category FROM node LEFT JOIN node_category as nc on node.category_id = nc.category_id WHERE mac = ?", target.String()) defer rows.Close() if err != nil { @@ -361,8 +362,8 @@ func AssignIP(dhcpHandler *DHCPHandler, ipRange string) (map[string]uint32, []ne } // AddDevicesOptions function add options on the fly -func AddDevicesOptions(object string, leaseDuration *time.Duration, GlobalOptions map[dhcp.OptionCode][]byte) { - x, err := decodeOptions(object) +func AddDevicesOptions(object string, leaseDuration *time.Duration, GlobalOptions map[dhcp.OptionCode][]byte, db *sql.DB) { + x, err := decodeOptions(object, db) if err == nil { for key, value := range x { if key == dhcp.OptionIPAddressLeaseTime { @@ -433,30 +434,30 @@ func IsIPv6(address net.IP) bool { } // MysqlUpdateIP4Log update the ip4log table -func MysqlUpdateIP4Log(mac string, ip string, duration time.Duration) error { - if err := MySQLdatabase.PingContext(ctx); err != nil { +func MysqlUpdateIP4Log(mac string, ip string, duration time.Duration, db *sql.DB) error { + if err := db.PingContext(ctx); err != nil { log.LoggerWContext(ctx).Error("Unable to ping database, reconnect: " + err.Error()) } - MAC2IP, err := MySQLdatabase.Prepare("SELECT ip FROM ip4log WHERE mac = ? AND (end_time = \"" + ZeroDate + "\" OR ( end_time + INTERVAL 30 SECOND ) > NOW()) ORDER BY start_time DESC LIMIT 1") + MAC2IP, err := db.Prepare("SELECT ip FROM ip4log WHERE mac = ? AND (end_time = \"" + ZeroDate + "\" OR ( end_time + INTERVAL 30 SECOND ) > NOW()) ORDER BY start_time DESC LIMIT 1") if err != nil { return err } defer MAC2IP.Close() - IP2MAC, err := MySQLdatabase.Prepare("SELECT mac FROM ip4log WHERE ip = ? AND (end_time = \"" + ZeroDate + "\" OR end_time > NOW()) ORDER BY start_time DESC") + IP2MAC, err := db.Prepare("SELECT mac FROM ip4log WHERE ip = ? AND (end_time = \"" + ZeroDate + "\" OR end_time > NOW()) ORDER BY start_time DESC") if err != nil { return err } defer IP2MAC.Close() - IPClose, err := MySQLdatabase.Prepare(" UPDATE ip4log SET end_time = NOW() WHERE ip = ?") + IPClose, err := db.Prepare(" UPDATE ip4log SET end_time = NOW() WHERE ip = ?") if err != nil { return err } - defer IP2MAC.Close() + defer IPClose.Close() - IPInsert, err := MySQLdatabase.Prepare("INSERT INTO ip4log (mac, ip, start_time, end_time) VALUES (?, ?, NOW(), DATE_ADD(NOW(), INTERVAL ? SECOND)) ON DUPLICATE KEY UPDATE mac=VALUES(mac), start_time=NOW(), end_time=VALUES(end_time)") + IPInsert, err := db.Prepare("INSERT INTO ip4log (mac, ip, start_time, end_time) VALUES (?, ?, NOW(), DATE_ADD(NOW(), INTERVAL ? SECOND)) ON DUPLICATE KEY UPDATE mac=VALUES(mac), start_time=NOW(), end_time=VALUES(end_time)") if err != nil { return err } diff --git a/go/cmd/pfdhcp/workers_pool.go b/go/cmd/pfdhcp/workers_pool.go index 11d5274f6b06..3e8075c54484 100644 --- a/go/cmd/pfdhcp/workers_pool.go +++ b/go/cmd/pfdhcp/workers_pool.go @@ -2,6 +2,7 @@ package main import ( "context" + "database/sql" _ "expvar" "net" "strconv" @@ -18,11 +19,12 @@ type job struct { clientAddr net.Addr //remote client ip srvAddr net.IP localCtx context.Context + db *sql.DB } func doWork(id int, element job) { var ans Answer - if ans = element.handler.ServeDHCP(element.localCtx, element.DHCPpacket, element.msgType, element.clientAddr, element.srvAddr); ans.D != nil { + if ans = element.handler.ServeDHCP(element.localCtx, element.DHCPpacket, element.msgType, element.clientAddr, element.srvAddr, element.db); ans.D != nil { ipStr, portStr, _ := net.SplitHostPort(element.clientAddr.String()) ctx = log.AddToLogContext(ctx, "mac", ans.MAC.String()) log.LoggerWContext(ctx).Debug("Giaddr " + element.DHCPpacket.GIAddr().String()) From 11d0a7424091a8eddd8d000499184e0168282861 Mon Sep 17 00:00:00 2001 From: Darren+Satkunas Date: Tue, 3 Dec 2024 22:20:08 +0000 Subject: [PATCH 149/176] update NEWS --- NEWS.asciidoc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/NEWS.asciidoc b/NEWS.asciidoc index 2abe4ddaa501..d35ffae3aef8 100644 --- a/NEWS.asciidoc +++ b/NEWS.asciidoc @@ -43,6 +43,10 @@ For a list of compatibility related changes see the < Date: Wed, 4 Dec 2024 06:42:47 -0500 Subject: [PATCH 150/176] fix dependency related errors --- containers/ntlm-auth-api/Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/containers/ntlm-auth-api/Dockerfile b/containers/ntlm-auth-api/Dockerfile index 5509152c9bda..35d824ef3e02 100644 --- a/containers/ntlm-auth-api/Dockerfile +++ b/containers/ntlm-auth-api/Dockerfile @@ -7,7 +7,7 @@ WORKDIR /usr/local/pf/ COPY bin bin RUN apt-get -qq update && \ - apt-get -yqq install python3-pip python3-pymysql python3-sdnotify python3-tz python3-dev gunicorn3 python3-psutil + apt-get -yqq install python3-pip python3-pymysql python3-sdnotify python3-tz python3-dev gunicorn3 python3-psutil python3-redis RUN VER=`python3 -c 'import sys; val=sys.version_info;print(str(val.major)+"."+str(val.minor))'` ; \ sudo rm -rf /usr/lib/python$VER/EXTERNALLY-MANAGED && \ @@ -15,4 +15,6 @@ RUN VER=`python3 -c 'import sys; val=sys.version_info;print(str(val.major)+"."+s COPY addons/ntlm-auth-api/openssl.cnf /usr/lib/ssl/openssl.cnf +WORKDIR /usr/local/pf/bin/pyntlm_auth + ENTRYPOINT /usr/bin/gunicorn -c /usr/local/pf/bin/pyntlm_auth/gunicorn.conf.py From 5bc531eae3ea05ef41426e0b01cc10e60531c063 Mon Sep 17 00:00:00 2001 From: Durand Date: Wed, 4 Dec 2024 08:05:46 -0500 Subject: [PATCH 151/176] Feature/track tls node (#8416) * Added node_tls table * Fixed syntax * Insert in node_tls from radius_audit_log * INSERT or UPDATE node_tls table if there is a radius EAP-TLS request * Removed escaped string before insert in DB * Match the unreg date of the node with the certificate provided * Parse request to convert attributes format * Typo * Fixed sql upgrade script --- db/pf-schema-X.Y.sql | 27 ++ db/upgrade-X.X-X.Y.sql | 24 ++ go/cron/flush_radius_audit_log_job.go | 292 ++++++++++++------ go/plugin/caddy2/pfpki/models/models.go | 6 +- .../DynamicRouting/Module/TLSEnrollment.pm | 2 +- .../_components/FormTypePacketfencePki.vue | 9 + .../pkiProviders/_components/index.js | 1 + lib/pf/pki_provider.pm | 2 + lib/pf/pki_provider/packetfence_pki.pm | 27 +- 9 files changed, 283 insertions(+), 107 deletions(-) diff --git a/db/pf-schema-X.Y.sql b/db/pf-schema-X.Y.sql index 83e8cb019c38..ee693b90dd9c 100644 --- a/db/pf-schema-X.Y.sql +++ b/db/pf-schema-X.Y.sql @@ -1619,6 +1619,33 @@ CREATE FUNCTION ROUND_TO_MONTH (d DATETIME) RETURNS DATETIME DETERMINISTIC RETURN DATE_ADD(DATE(d),interval -DAY(d)+1 DAY); + +-- +-- Create table node_tls +-- + +CREATE TABLE node_tls ( + `mac` varchar(17) NOT NULL PRIMARY KEY, + `TLSCertSerial` varchar(255) default NULL, + `TLSCertExpiration` varchar(255) default NULL, + `TLSCertValidSince` varchar(255) default NULL, + `TLSCertSubject` varchar(255) default NULL, + `TLSCertIssuer` varchar(255) default NULL, + `TLSCertCommonName` varchar(255) default NULL, + `TLSCertSubjectAltNameEmail` varchar(255) default NULL, + `TLSClientCertSerial` varchar(255) default NULL, + `TLSClientCertExpiration` varchar(255) default NULL, + `TLSClientCertValidSince` varchar(255) default NULL, + `TLSClientCertSubject` varchar(255) default NULL, + `TLSClientCertIssuer` varchar(255) default NULL, + `TLSClientCertCommonName` varchar(255) default NULL, + `TLSClientCertSubjectAltNameEmail` varchar(255) default NULL, + `TLSClientCertX509v3ExtendedKeyUsage` varchar(255) default NULL, + `TLSClientCertX509v3SubjectKeyIdentifier` varchar(255) default NULL, + `TLSClientCertX509v3AuthorityKeyIdentifier` varchar(255) default NULL, + `TLSClientCertX509v3ExtendedKeyUsageOID` varchar(255) default NULL +) ENGINE=InnoDB DEFAULT CHARACTER SET = 'utf8mb4' COLLATE = 'utf8mb4_general_ci'; + -- -- Updating to current version -- diff --git a/db/upgrade-X.X-X.Y.sql b/db/upgrade-X.X-X.Y.sql index 69582502bfaa..1f1f57f6bed9 100644 --- a/db/upgrade-X.X-X.Y.sql +++ b/db/upgrade-X.X-X.Y.sql @@ -70,6 +70,30 @@ ALTER TABLE radius_audit_log MODIFY created_at TIMESTAMP NOT NULL DEFAULT CURREN \! echo "Make psk unique"; ALTER TABLE person ADD CONSTRAINT UNIQUE person_psk (`psk`); +\! echo "Create table node_tls" + +CREATE TABLE IF NOT EXISTS node_tls ( + `mac` varchar(17) NOT NULL PRIMARY KEY, + `TLSCertSerial` varchar(255) default NULL, + `TLSCertExpiration` varchar(255) default NULL, + `TLSCertValidSince` varchar(255) default NULL, + `TLSCertSubject` varchar(255) default NULL, + `TLSCertIssuer` varchar(255) default NULL, + `TLSCertCommonName` varchar(255) default NULL, + `TLSCertSubjectAltNameEmail` varchar(255) default NULL, + `TLSClientCertSerial` varchar(255) default NULL, + `TLSClientCertExpiration` varchar(255) default NULL, + `TLSClientCertValidSince` varchar(255) default NULL, + `TLSClientCertSubject` varchar(255) default NULL, + `TLSClientCertIssuer` varchar(255) default NULL, + `TLSClientCertCommonName` varchar(255) default NULL, + `TLSClientCertSubjectAltNameEmail` varchar(255) default NULL, + `TLSClientCertX509v3ExtendedKeyUsage` varchar(255) default NULL, + `TLSClientCertX509v3SubjectKeyIdentifier` varchar(255) default NULL, + `TLSClientCertX509v3AuthorityKeyIdentifier` varchar(255) default NULL, + `TLSClientCertX509v3ExtendedKeyUsageOID` varchar(255) default NULL +) ENGINE=InnoDB DEFAULT CHARACTER SET = 'utf8mb4' COLLATE = 'utf8mb4_general_ci'; + \! echo "Incrementing PacketFence schema version..."; INSERT IGNORE INTO pf_version (id, version, created_at) VALUES (@VERSION_INT, CONCAT_WS('.', @MAJOR_VERSION, @MINOR_VERSION), NOW()); diff --git a/go/cron/flush_radius_audit_log_job.go b/go/cron/flush_radius_audit_log_job.go index f4828cacf075..df15d1355b7e 100644 --- a/go/cron/flush_radius_audit_log_job.go +++ b/go/cron/flush_radius_audit_log_job.go @@ -71,11 +71,11 @@ func (j *FlushRadiusAuditLogJob) Run() { log.LogError(ctx, fmt.Sprintf("%s error running: %s", j.Name(), err.Error())) continue } - jsonStr = string(s) } - + jsonStr = strings.Replace(jsonStr, "\\", "", -1) err := json.Unmarshal([]byte(jsonStr), &entry) + if err != nil { log.LogError(ctx, fmt.Sprintf("%s error running: %s", j.Name(), err.Error())) continue @@ -121,11 +121,33 @@ func (j *FlushRadiusAuditLogJob) flushLogs(entries [][]interface{}) error { return err } + // REPLACE in node_tls + sqlTLS, argsTLS, err := j.buildQueryTLS(entries) + if err != nil { + return err + } + + res, err = db.ExecContext( + ctx, + sqlTLS, + argsTLS..., + ) + + if err != nil { + return err + } + + _, err = res.RowsAffected() + if err != nil { + return err + } + log.LogInfo(ctx, fmt.Sprintf("Flushed %d radius_audit_log", rows)) return nil } const RADIUS_AUDIT_LOG_COLUMN_COUNT = 37 +const NODE_TLS_COLUMN_COUNT = 19 /* query = "INSERT INTO radius_audit_log \ @@ -232,6 +254,79 @@ func (j *FlushRadiusAuditLogJob) argsFromEntry(entry []interface{}) []interface{ return args } +func (j *FlushRadiusAuditLogJob) buildQueryTLS(entries [][]interface{}) (string, []interface{}, error) { + sql := ` +INSERT INTO node_tls + ( + mac, TLSCertSerial, TLSCertExpiration, TLSCertValidSince, + TLSCertSubject, TLSCertIssuer, TLSCertCommonName, + TLSCertSubjectAltNameEmail, TLSClientCertSerial, + TLSClientCertExpiration, TLSClientCertValidSince, + TLSClientCertSubject, TLSClientCertIssuer, + TLSClientCertCommonName, TLSClientCertSubjectAltNameEmail, + TLSClientCertX509v3ExtendedKeyUsage, + TLSClientCertX509v3SubjectKeyIdentifier, + TLSClientCertX509v3AuthorityKeyIdentifier, + TLSClientCertX509v3ExtendedKeyUsageOID + ) +VALUES ` + bind := "( ?" + strings.Repeat(",?", NODE_TLS_COLUMN_COUNT-1) + ")" + sql += bind + strings.Repeat(","+bind, len(entries)-1) + sql += ` + ON DUPLICATE KEY UPDATE TLSCertSerial = VALUES(TLSCertSerial), + TLSCertExpiration = VALUES(TLSCertExpiration), TLSCertValidSince = VALUES(TLSCertValidSince), + TLSCertSubject = VALUES(TLSCertSubject), TLSCertIssuer = VALUES(TLSCertIssuer), TLSCertCommonName = VALUES( TLSCertCommonName), + TLSCertSubjectAltNameEmail = VALUES(TLSCertSubjectAltNameEmail), TLSClientCertSerial = VALUES(TLSClientCertSerial), + TLSClientCertExpiration = VALUES(TLSClientCertExpiration), TLSClientCertValidSince = VALUES(TLSClientCertValidSince), + TLSClientCertSubject = VALUES(TLSClientCertSubject), TLSClientCertIssuer = VALUES(TLSClientCertIssuer), + TLSClientCertCommonName = VALUES(TLSClientCertCommonName), TLSClientCertSubjectAltNameEmail = VALUES(TLSClientCertSubjectAltNameEmail), + TLSClientCertX509v3ExtendedKeyUsage = VALUES(TLSClientCertX509v3ExtendedKeyUsage), + TLSClientCertX509v3SubjectKeyIdentifier = VALUES(TLSClientCertX509v3SubjectKeyIdentifier), + TLSClientCertX509v3AuthorityKeyIdentifier = VALUES(TLSClientCertX509v3AuthorityKeyIdentifier), + TLSClientCertX509v3ExtendedKeyUsageOID = VALUES(TLSClientCertX509v3ExtendedKeyUsageOID) + + ` + args := make([]interface{}, 0, NODE_TLS_COLUMN_COUNT) + for _, e := range entries { + if keyExists(e[1].(map[string]interface{}), "Calling-Station-Id") && keyExists(e[1].(map[string]interface{}), "TLS-Client-Cert-Common-Name") { + args = append(args, j.argsFromEntryForTLS(e)...) + } + } + return sql, args, nil +} + +func keyExists(myMap map[string]interface{}, key string) bool { + _, exists := myMap[key] + return exists +} + +func (j *FlushRadiusAuditLogJob) argsFromEntryForTLS(entry []interface{}) []interface{} { + args := make([]interface{}, NODE_TLS_COLUMN_COUNT) + var request map[string]interface{} + request = entry[1].(map[string]interface{}) + request = parseRequestArgs(request) + args[0] = formatRequestValue(request["Calling-Station-Id"], "") + args[1] = formatRequestValue(request["TLS-Cert-Serial"], "N/A") + args[2] = formatRequestValue(request["TLS-Cert-Expiration"], "N/A") + args[3] = formatRequestValue(request["TLS-Cert-Valid-Since"], "N/A") + args[4] = formatRequestValue(request["TLS-Cert-Subject"], "N/A") + args[5] = formatRequestValue(request["TLS-Cert-Issuer"], "N/A") + args[6] = formatRequestValue(request["TLS-Cert-Common-Name"], "N/A") + args[7] = formatRequestValue(request["TLS-Cert-Subject-Alt-Name-Email"], "N/A") + args[8] = formatRequestValue(request["TLS-Client-Cert-Serial"], "N/A") + args[9] = formatRequestValue(request["TLS-Client-Cert-Expiration"], "N/A") + args[10] = formatRequestValue(request["TLS-Client-Cert-Valid-Since"], "N/A") + args[11] = formatRequestValue(request["TLS-Client-Cert-Subject"], "N/A") + args[12] = formatRequestValue(request["TLS-Client-Cert-Issuer"], "N/A") + args[13] = formatRequestValue(request["TLS-Client-Cert-Common-Name"], "N/A") + args[14] = formatRequestValue(request["TLS-Client-Cert-Subject-Alt-Name-Email"], "N/A") + args[15] = formatRequestValue(request["TLS-Client-Cert-X509v3-Extended-Key-Usage"], "N/A") + args[16] = formatRequestValue(request["TLS-Client-Cert-X509v3-Subject-Key-Identifier"], "N/A") + args[17] = formatRequestValue(request["TLS-Client-Cert-X509v3-Authority-Key-Identifier"], "N/A") + args[18] = formatRequestValue(request["TLS-Client-Cert-X509v3-Extended-Key-Usage-OID"], "N/A") + return args +} + func formatRequest(request map[string]interface{}) string { parts := []string{} keys := util.MapKeys(request) @@ -312,7 +407,6 @@ func escapeRadiusRequest(s string) string { if size == len(s) { return s } - out := make([]byte, size) j := 0 for _, c := range []byte(s) { @@ -370,114 +464,114 @@ func parseRequestArgs(request map[string]interface{}) map[string]interface{} { type AKMSuite int const ( - AKMReserved AKMSuite = iota // 0 - Reserved - IEEE8021X // 1 - 802.1X - PSK // 2 - PSK - FT_8021X // 3 - FT over 802.1X - FT_PSK // 4 - FT over PSK - WPA_8021X // 5 - WPA with 802.1X - WPA_PSK // 6 - WPA with PSK - OWE // 7 - OWE - OWE_Transition // 8 - OWE Transition Mode - SAE // 9 - Simultaneous Authentication of Equals - FT_SAE // 10 - FT over SAE - FILS_SHA256 // 11 - FILS-SHA256 - FILS_SHA384 // 12 - FILS-SHA384 - FT_FILS_SHA256 // 13 - FT over FILS-SHA256 - FT_FILS_SHA384 // 14 - FT over FILS-SHA384 - OWE_transition_mode // 15 - OWE transition mode + AKMReserved AKMSuite = iota // 0 - Reserved + IEEE8021X // 1 - 802.1X + PSK // 2 - PSK + FT_8021X // 3 - FT over 802.1X + FT_PSK // 4 - FT over PSK + WPA_8021X // 5 - WPA with 802.1X + WPA_PSK // 6 - WPA with PSK + OWE // 7 - OWE + OWE_Transition // 8 - OWE Transition Mode + SAE // 9 - Simultaneous Authentication of Equals + FT_SAE // 10 - FT over SAE + FILS_SHA256 // 11 - FILS-SHA256 + FILS_SHA384 // 12 - FILS-SHA384 + FT_FILS_SHA256 // 13 - FT over FILS-SHA256 + FT_FILS_SHA384 // 14 - FT over FILS-SHA384 + OWE_transition_mode // 15 - OWE transition mode ) type CipherSuite int const ( - CipherReserved CipherSuite = iota // 0 - Reserved - WEP40 // 1 - WEP-40 - TKIP // 2 - TKIP - CipherReserved3 // 3 - Reserved - CCMP128 // 4 - CCMP-128 - WEP104 // 5 - WEP-104 - BIPCMAC128 // 6 - BIP-CMAC-128 - GCMP128 // 7 - GCMP-128 - GCMP256 // 8 - GCMP-256 - CCMP256 // 9 - CCMP-256 - BIPGMAC128 // 10 - BIP-GMAC-128 - BIPGMAC256 // 11 - BIP-GMAC-256 - SMS4 // 12 - SMS4 - CKIP128 // 13 - CKIP-128 - CKIP128_PMK // 14 - CKIP-128 with PMK caching - CipherReserved15 // 15 - Reserved + CipherReserved CipherSuite = iota // 0 - Reserved + WEP40 // 1 - WEP-40 + TKIP // 2 - TKIP + CipherReserved3 // 3 - Reserved + CCMP128 // 4 - CCMP-128 + WEP104 // 5 - WEP-104 + BIPCMAC128 // 6 - BIP-CMAC-128 + GCMP128 // 7 - GCMP-128 + GCMP256 // 8 - GCMP-256 + CCMP256 // 9 - CCMP-256 + BIPGMAC128 // 10 - BIP-GMAC-128 + BIPGMAC256 // 11 - BIP-GMAC-256 + SMS4 // 12 - SMS4 + CKIP128 // 13 - CKIP-128 + CKIP128_PMK // 14 - CKIP-128 with PMK caching + CipherReserved15 // 15 - Reserved ) -func(c CipherSuite) String() string { +func (c CipherSuite) String() string { switch c { - case WEP40: - return "WEP-40" - case TKIP: - return "TKIP" - case CCMP128: - return "CCMP-128" - case WEP104: - return "WEP-104" - case GCMP128: - return "GCMP-128" - case GCMP256: - return "GCMP-256" - case CCMP256: - return "CCMP-256" - case BIPCMAC128: - return "BIP-CMAC-128" - case BIPGMAC128: - return "BIP-GMAC-128" - case BIPGMAC256: - return "BIP-GMAC-256" - case SMS4: - return "SMS4" - case CKIP128: - return "CKIP-128" - case CKIP128_PMK: - return "CKIP-128 with PMK caching" - case CipherReserved3, CipherReserved15: - return "Reserved" - default: - return fmt.Sprintf("Unknown cipher suite (Value: %d)", c) + case WEP40: + return "WEP-40" + case TKIP: + return "TKIP" + case CCMP128: + return "CCMP-128" + case WEP104: + return "WEP-104" + case GCMP128: + return "GCMP-128" + case GCMP256: + return "GCMP-256" + case CCMP256: + return "CCMP-256" + case BIPCMAC128: + return "BIP-CMAC-128" + case BIPGMAC128: + return "BIP-GMAC-128" + case BIPGMAC256: + return "BIP-GMAC-256" + case SMS4: + return "SMS4" + case CKIP128: + return "CKIP-128" + case CKIP128_PMK: + return "CKIP-128 with PMK caching" + case CipherReserved3, CipherReserved15: + return "Reserved" + default: + return fmt.Sprintf("Unknown cipher suite (Value: %d)", c) } } -func(a AKMSuite) String() string { +func (a AKMSuite) String() string { switch a { - case IEEE8021X: - return "802.1X" - case PSK: - return "PSK" - case FT_8021X: - return "FT over 802.1X" - case FT_PSK: - return "FT over PSK" - case WPA_8021X: - return "WPA with 802.1X" - case WPA_PSK: - return "WPA with PSK" - case OWE: - return "OWE" - case OWE_Transition: - return "OWE Transition Mode" - case SAE: - return "SAE" - case FT_SAE: - return "FT over SAE" - case FILS_SHA256: - return "FILS-SHA256" - case FILS_SHA384: - return "FILS-SHA384" - case FT_FILS_SHA256: - return "FT over FILS-SHA256" - case FT_FILS_SHA384: - return "FT over FILS-SHA384" - case OWE_transition_mode: - return "OWE transition mode" - default: - return fmt.Sprintf("Unknown or Reserved AKM suite (Value: %d)", a) + case IEEE8021X: + return "802.1X" + case PSK: + return "PSK" + case FT_8021X: + return "FT over 802.1X" + case FT_PSK: + return "FT over PSK" + case WPA_8021X: + return "WPA with 802.1X" + case WPA_PSK: + return "WPA with PSK" + case OWE: + return "OWE" + case OWE_Transition: + return "OWE Transition Mode" + case SAE: + return "SAE" + case FT_SAE: + return "FT over SAE" + case FILS_SHA256: + return "FILS-SHA256" + case FILS_SHA384: + return "FILS-SHA384" + case FT_FILS_SHA256: + return "FT over FILS-SHA256" + case FT_FILS_SHA384: + return "FT over FILS-SHA384" + case OWE_transition_mode: + return "OWE transition mode" + default: + return fmt.Sprintf("Unknown or Reserved AKM suite (Value: %d)", a) } } diff --git a/go/plugin/caddy2/pfpki/models/models.go b/go/plugin/caddy2/pfpki/models/models.go index cfe0b350d842..c1048cb75995 100644 --- a/go/plugin/caddy2/pfpki/models/models.go +++ b/go/plugin/caddy2/pfpki/models/models.go @@ -1243,7 +1243,10 @@ func (c Cert) New() (types.Info, error) { Subject := c.MakeSubject() - NotAfter := time.Now().AddDate(0, 0, prof.Validity) + NotAfter := c.ValidUntil + if c.ValidUntil.IsZero() { + NotAfter = time.Now().AddDate(0, 0, prof.Validity) + } // Prepare certificate cert := &x509.Certificate{ @@ -1268,6 +1271,7 @@ func (c Cert) New() (types.Info, error) { if len(c.Mail) > 0 { Email = c.Mail } + if len(Email) > 0 { for _, mail := range strings.Split(Email, ",") { cert.EmailAddresses = append(cert.EmailAddresses, mail) diff --git a/html/captive-portal/lib/captiveportal/PacketFence/DynamicRouting/Module/TLSEnrollment.pm b/html/captive-portal/lib/captiveportal/PacketFence/DynamicRouting/Module/TLSEnrollment.pm index a0a605271e57..e006d6435015 100644 --- a/html/captive-portal/lib/captiveportal/PacketFence/DynamicRouting/Module/TLSEnrollment.pm +++ b/html/captive-portal/lib/captiveportal/PacketFence/DynamicRouting/Module/TLSEnrollment.pm @@ -195,7 +195,7 @@ sub get_bundle { my $mac = $self->current_mac; my $user_cache = $self->app->user_cache; my $pki_session = $user_cache->compute("pki_session", sub {}); - my $cert_content = $pki_provider->get_bundle({ certificate_email => $pki_session->{certificate_email}, certificate_cn => $pki_session->{certificate_cn}, certificate_pwd => $pki_session->{certificate_pwd} }); + my $cert_content = $pki_provider->get_bundle({ certificate_email => $pki_session->{certificate_email}, certificate_cn => $pki_session->{certificate_cn}, certificate_pwd => $pki_session->{certificate_pwd}, unregdate => $self->new_node_info->{'unregdate'}}); get_logger->debug(sub { "cert_content from pki service $cert_content" }); unless(defined($cert_content)){ diff --git a/html/pfappserver/root/src/views/Configuration/pkiProviders/_components/FormTypePacketfencePki.vue b/html/pfappserver/root/src/views/Configuration/pkiProviders/_components/FormTypePacketfencePki.vue index 7ad0062df9ec..c255e49eb65f 100644 --- a/html/pfappserver/root/src/views/Configuration/pkiProviders/_components/FormTypePacketfencePki.vue +++ b/html/pfappserver/root/src/views/Configuration/pkiProviders/_components/FormTypePacketfencePki.vue @@ -62,6 +62,13 @@ disabled-value="N" /> + + (is => 'rw', default => '%s'); has revoke_on_unregistration => (is => 'rw', default => 'N'); +has certificate_validity_time_from_unreg_date => (is => 'rw', default => 'N'); + =head2 country What country to use for the certificate diff --git a/lib/pf/pki_provider/packetfence_pki.pm b/lib/pf/pki_provider/packetfence_pki.pm index 211cfb444a9a..1839dd304f9f 100644 --- a/lib/pf/pki_provider/packetfence_pki.pm +++ b/lib/pf/pki_provider/packetfence_pki.pm @@ -20,6 +20,9 @@ use URI::Escape::XS qw(uri_escape uri_unescape); use pf::api::unifiedapiclient; use pf::dal::key_value_storage; use pf::error qw(is_success is_error); +use DateTime::TimeZone; +use DateTime::Format::Strptime; +use pf::util qw(isenabled); extends 'pf::pki_provider'; @@ -55,11 +58,8 @@ sub get_bundle { my $street = $self->streetaddress; my $postalcode = $self->postalcode; my $streetaddress = $self->streetaddress; - - my $certpwd = $args->{'certificate_pwd'}; - - my $value = eval { - my $return = pf::api::unifiedapiclient->default_client->call("POST", "/api/v1/pki/certs", { + my $expiration = $args->{'unregdate'}; + my $payload = { "cn" => $cn, "mail" => $email, "organisation" => $organisation, @@ -69,7 +69,22 @@ sub get_bundle { "postal_code" => $postalcode, "street_address" => $streetaddress, "profile_id" => $profile, - }); + }; + + if (defined($expiration) && $expiration ne "" && isenabled($self->certificate_validity_time_from_unreg_date) ) { + my $tz = $ENV{TZ} || DateTime::TimeZone->new( name => 'local' )->name(); + my $formatter = DateTime::Format::Strptime->new(pattern => "%F %T",time_zone=>$tz); + my $dt_obj = $formatter->parse_datetime($expiration); + # to convert to a different zone: + $dt_obj->set_time_zone('UTC'); + my $dt = $dt_obj->strftime("%Y-%m-%dT%T%z"); + $dt =~ s/(.*)\+(\d{2})(\d{2})/$1Z/g; + $payload->{"valid_until"} = "$dt"; + } + my $certpwd = $args->{'certificate_pwd'}; + + my $value = eval { + my $return = pf::api::unifiedapiclient->default_client->call("POST", "/api/v1/pki/certs", $payload); }; if ($@) { $logger->warn("Certificate creation failed"); From 17be61f329fa8f39cbda557baf4690fde5bb659f Mon Sep 17 00:00:00 2001 From: Darren+Satkunas Date: Wed, 4 Dec 2024 13:06:13 +0000 Subject: [PATCH 152/176] update NEWS --- NEWS.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/NEWS.asciidoc b/NEWS.asciidoc index d35ffae3aef8..bf12983b88a4 100644 --- a/NEWS.asciidoc +++ b/NEWS.asciidoc @@ -47,6 +47,7 @@ For a list of compatibility related changes see the < Date: Wed, 4 Dec 2024 08:57:41 -0500 Subject: [PATCH 153/176] Fixed module naming from Cisco::WLC to Cisco::Cisco_WLC_AireOS --- conf/pfsetacls/acl.cfg | 2 +- conf/switch_filters.conf.example | 4 ++-- conf/switches.conf.example | 2 +- lib/pf/Switch.pm | 2 +- lib/pf/Switch/Cisco/Aironet_WDS.pm | 7 ++++--- lib/pf/Switch/Meraki/MR_v2.pm | 2 +- lib/pf/Switch/MockedSwitch.pm | 2 +- 7 files changed, 11 insertions(+), 10 deletions(-) diff --git a/conf/pfsetacls/acl.cfg b/conf/pfsetacls/acl.cfg index 534c0b94136a..85da3ab22983 100644 --- a/conf/pfsetacls/acl.cfg +++ b/conf/pfsetacls/acl.cfg @@ -1,5 +1,5 @@ [% FOREACH key IN acls.keys %] -[% IF type.match('Cisco::WLC') %]acl delete [% key %][% ELSIF type.match('Cisco::ASA') or type.match('Cisco::\w+') %]Extended IP access list [% key %][% ELSIF type.match('Aruba::CX') %]config +[% IF type.match('Cisco::Cisco_WLC_AireOS') %]acl delete [% key %][% ELSIF type.match('Cisco::ASA') or type.match('Cisco::\w+') %]Extended IP access list [% key %][% ELSIF type.match('Aruba::CX') %]config no access-list ip [% key %] access-list ip [% key %][% END %] [% acls.$key %][% IF type.match('Aruba::CX') %]exit diff --git a/conf/switch_filters.conf.example b/conf/switch_filters.conf.example index 50e5ffc589f8..709a2e99a4f4 100644 --- a/conf/switch_filters.conf.example +++ b/conf/switch_filters.conf.example @@ -248,7 +248,7 @@ #scope = external_portal #switch = Fortinet::FortiGate # -# - If the device is authenticating on a wireless connection, then use the Cisco::WLC_5500 module +# - If the device is authenticating on a wireless connection, then use the Cisco::Cisco_WLC_AireOS module # #[wireless_radius] #filter = radius_request.NAS-Port-Type @@ -262,4 +262,4 @@ # #[msmodule4wired:wireless_locationlog|wireless_radius] #scope=instantiate_module -#switch = Cisco::WLC_5500 +#switch = Cisco::Cisco_WLC_AireOS diff --git a/conf/switches.conf.example b/conf/switches.conf.example index 6f248977fa51..5f8c3b3053de 100644 --- a/conf/switches.conf.example +++ b/conf/switches.conf.example @@ -32,7 +32,7 @@ VoIPLLDPDetect=N [192.168.1.0/24] description=Test Range WLC -type = Cisco::WLC +type = Cisco::Cisco_WLC_AireOS mode = production uplink_dynamic=0 VoIPLLDPDetect=N diff --git a/lib/pf/Switch.pm b/lib/pf/Switch.pm index 47ff9770b1d4..00b9b52f1c9b 100644 --- a/lib/pf/Switch.pm +++ b/lib/pf/Switch.pm @@ -4366,7 +4366,7 @@ sub generateAnsibleConfiguration { $vars{'switches'}{$switch_id}{'delete'} = $delete; switch($self->{'_type'}) { case /Cisco::ASA/ { $vars{'switches'}{$switch_id}{'ansible_network_os'} = "cisco.asa" } - case /Cisco::WLC/ { $vars{'switches'}{$switch_id}{'ansible_network_os'} = "aireos" } + case /Cisco::Cisco_WLC_AireOS/ { $vars{'switches'}{$switch_id}{'ansible_network_os'} = "aireos" } case /Cisco::/ { $vars{'switches'}{$switch_id}{'ansible_network_os'} = "cisco.ios.ios" } case /Aruba::CX/ { $vars{'switches'}{$switch_id}{'ansible_network_os'} = "arubanetworks.aoscx.aoscx" } } diff --git a/lib/pf/Switch/Cisco/Aironet_WDS.pm b/lib/pf/Switch/Cisco/Aironet_WDS.pm index 3eb7ffde06d4..c908bdf11086 100644 --- a/lib/pf/Switch/Cisco/Aironet_WDS.pm +++ b/lib/pf/Switch/Cisco/Aironet_WDS.pm @@ -1,4 +1,5 @@ package pf::Switch::Cisco::Aironet_WDS; + =head1 NAME pf::Switch::Cisco::Aironet_WDS - Object oriented module to parse SNMP traps @@ -6,7 +7,7 @@ and manage Cisco Aironet configured in Wireless Domain Services (WDS) mode. =head1 STATUS -This module implements some changes on top of L. +This module implements some changes on top of L. You should also consult the documentation over there if you experience issues. =over @@ -52,7 +53,7 @@ use pf::log; use Net::SNMP; use Try::Tiny; -use base ('pf::Switch::Cisco::WLC'); +use base ('pf::Switch::Cisco::Cisco_WLC_AireOS'); use pf::util qw(format_mac_as_cisco); @@ -70,7 +71,7 @@ sub description { 'Cisco Aironet (WDS)' } De-authenticate a MAC address from wireless network (including 802.1x). -Diverges from L in the following aspects: +Diverges from L in the following aspects: =over diff --git a/lib/pf/Switch/Meraki/MR_v2.pm b/lib/pf/Switch/Meraki/MR_v2.pm index 8a2705a5ef5e..170df437578a 100644 --- a/lib/pf/Switch/Meraki/MR_v2.pm +++ b/lib/pf/Switch/Meraki/MR_v2.pm @@ -38,7 +38,7 @@ sub returnRoleAttribute { =item deauthenticateMacDefault -Some of the attributes from Cisco::WLC aren't necessary +Some of the attributes from Cisco::Cisco_WLC_AireOS aren't necessary =cut diff --git a/lib/pf/Switch/MockedSwitch.pm b/lib/pf/Switch/MockedSwitch.pm index cfcd17abc73f..45ba464d8492 100644 --- a/lib/pf/Switch/MockedSwitch.pm +++ b/lib/pf/Switch/MockedSwitch.pm @@ -3403,7 +3403,7 @@ sub generateAnsibleConfiguration { $vars{'switches'}{$switch_id}{'id'} = $switch_ip; switch($self->{'_type'}) { case /Cisco::ASA/ { $vars{'switches'}{$switch_id}{'ansible_network_os'} = "cisco.asa" } - case /Cisco::WLC/ { $vars{'switches'}{$switch_id}{'ansible_network_os'} = "aireos" } + case /Cisco::Cisco_WLC_AireOS/ { $vars{'switches'}{$switch_id}{'ansible_network_os'} = "aireos" } case /Cisco::/ { $vars{'switches'}{$switch_id}{'ansible_network_os'} = "cisco.ios.ios" } case /Aruba::CX/ { $vars{'switches'}{$switch_id}{'ansible_network_os'} = "arubanetworks.aoscx.aoscx" } } From a04fa4bdf62e630b47e5423643615aefa658eeeb Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Wed, 4 Dec 2024 09:43:15 -0500 Subject: [PATCH 154/176] Fixed typo --- lib/pf/mfa/Akamai.pm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/pf/mfa/Akamai.pm b/lib/pf/mfa/Akamai.pm index 3c54e53ac354..eb65fa15d741 100644 --- a/lib/pf/mfa/Akamai.pm +++ b/lib/pf/mfa/Akamai.pm @@ -344,7 +344,7 @@ sub check_auth { my $post_fields = encode_json({tx => $infos->{'tx'}, user_input => $otp}); my ($return, $error) = $self->_get_curl("/api/v1/verify/check_auth?tx=".$infos->{'tx'}."&user_input=".$otp); if ($error) { - $message = "Error trying to verify the OTP code for user $username" + $message = "Error trying to verify the OTP code for user $username"; $logger->error($message); return $FALSE, $message; } From da1c3d176a59b0526277e49a46c4ceda17ab3201 Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Wed, 4 Dec 2024 09:51:54 -0500 Subject: [PATCH 155/176] Added certificate_validity_time_from_unreg_date in pfappserver --- .../Form/Config/PKI_Provider/packetfence_pki.pm | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/html/pfappserver/lib/pfappserver/Form/Config/PKI_Provider/packetfence_pki.pm b/html/pfappserver/lib/pfappserver/Form/Config/PKI_Provider/packetfence_pki.pm index 6d7af2758c02..f69f39809db2 100644 --- a/html/pfappserver/lib/pfappserver/Form/Config/PKI_Provider/packetfence_pki.pm +++ b/html/pfappserver/lib/pfappserver/Form/Config/PKI_Provider/packetfence_pki.pm @@ -193,8 +193,17 @@ has_field 'revoke_on_unregistration' => ( }, ); +has_field 'certificate_validity_time_from_unreg_date' => ( + type => 'Checkbox', + checkbox_value => 'Y', + tags => { + after_element => \&help, + help => 'Enable to apply the same expiration date of the certificate as the unregistration date of the node.', + }, +); + has_block 'definition' => ( - render_list => [ qw(type proto host port username password profile country state organization cn_attribute cn_format revoke_on_unregistration ca_cert_path server_cert_path) ], + render_list => [ qw(type proto host port username password profile country state organization cn_attribute cn_format revoke_on_unregistration ca_cert_path server_cert_pathi certificate_validity_time_from_unreg_date) ], ); =head1 AUTHOR From 406aca7c691a82d4583a331e6fdb1aabc5071469 Mon Sep 17 00:00:00 2001 From: Durand Fabrice Date: Wed, 4 Dec 2024 10:24:40 -0500 Subject: [PATCH 156/176] Fixed sql upgrade schema --- db/upgrade-X.X-X.Y.sql | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/db/upgrade-X.X-X.Y.sql b/db/upgrade-X.X-X.Y.sql index 1f1f57f6bed9..8943523c6a89 100644 --- a/db/upgrade-X.X-X.Y.sql +++ b/db/upgrade-X.X-X.Y.sql @@ -56,7 +56,8 @@ ALTER TABLE `pki_profiles` ADD IF NOT EXISTS `allow_duplicated_cn` bigint(20) UNSIGNED DEFAULT 0, ADD IF NOT EXISTS `maximum_duplicated_cn` bigint(20) DEFAULT 0, MODIFY `scep_server_enabled` bigint(20) DEFAULT 0, - RENAME INDEX scep_server__id TO scep_server_id; + DROP INDEX IF EXISTS `scep_server__id`, + ADD INDEX IF NOT EXISTS `scep_server_id` (`scep_server_id`); \! echo "altering pki_certs" ALTER TABLE `pki_certs` From ba67d0d0d87e76890362a471cdd0ae278751ac25 Mon Sep 17 00:00:00 2001 From: Extra Fu Date: Wed, 4 Dec 2024 10:31:51 -0500 Subject: [PATCH 157/176] fix: upgrade mixpanel-browser from 2.55.1 to 2.56.0 (#8412) Snyk has created this PR to upgrade mixpanel-browser from 2.55.1 to 2.56.0. See this package in npm: mixpanel-browser See this project in Snyk: https://app.snyk.io/org/akamai-esg-pilot-org/project/08730ed5-aba9-49b2-868f-15e5e652ebcc?utm_source=github&utm_medium=referral&page=upgrade-pr Co-authored-by: snyk-bot --- html/pfappserver/root/package-lock.json | 10 +++++----- html/pfappserver/root/package.json | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/html/pfappserver/root/package-lock.json b/html/pfappserver/root/package-lock.json index ae6b28c95c5b..a4d8b59e3bed 100644 --- a/html/pfappserver/root/package-lock.json +++ b/html/pfappserver/root/package-lock.json @@ -10,7 +10,7 @@ "dependencies": { "@vue/composition-api": "^1.7.2", "autoprefixer": "10.4.5", - "axios": "^1.7.8", + "axios": "1.7.8", "bootstrap": "4.6.1", "bootstrap-vue": "2.23.1", "core-js": "3.37.1", @@ -19,7 +19,7 @@ "lodash": "^4.17.21", "messageformat": "^2.3.0", "mime-types": "^2.1.35", - "mixpanel-browser": "^2.55.1", + "mixpanel-browser": "^2.56.0", "papaparse": "5.4.1", "pinia": "^2.1.7", "plotly.js-dist-min": "^2.35.2", @@ -17567,9 +17567,9 @@ "license": "MIT" }, "node_modules/mixpanel-browser": { - "version": "2.55.1", - "resolved": "https://registry.npmjs.org/mixpanel-browser/-/mixpanel-browser-2.55.1.tgz", - "integrity": "sha512-NSEPdFSJxoR1OCKWKHbtqd3BeH1c9NjXbEt0tN5TgBEO1nSDji6niU9n4MopAXOP0POET9spjpQKxZtLZKTJwA==", + "version": "2.56.0", + "resolved": "https://registry.npmjs.org/mixpanel-browser/-/mixpanel-browser-2.56.0.tgz", + "integrity": "sha512-GYeEz58pV2M9MZtK8vSPL4oJmCwGS08FDDRZvZwr5VJpWdT4Lgyg6zXhmNfCmSTEIw2coaarm7HZ4FL9dAVvnA==", "license": "Apache-2.0", "dependencies": { "rrweb": "2.0.0-alpha.13" diff --git a/html/pfappserver/root/package.json b/html/pfappserver/root/package.json index 038ba65b08fe..8295df5b7da4 100644 --- a/html/pfappserver/root/package.json +++ b/html/pfappserver/root/package.json @@ -21,7 +21,7 @@ "lodash": "^4.17.21", "messageformat": "^2.3.0", "mime-types": "^2.1.35", - "mixpanel-browser": "^2.55.1", + "mixpanel-browser": "^2.56.0", "papaparse": "5.4.1", "pinia": "^2.1.7", "plotly.js-locales": "^2.32.0", From 32ff120638af0b9c669a26bdd828d726e715cd3d Mon Sep 17 00:00:00 2001 From: Extra Fu Date: Wed, 4 Dec 2024 10:32:26 -0500 Subject: [PATCH 158/176] fix: upgrade vue-eslint-parser from 9.4.2 to 9.4.3 (#8410) Snyk has created this PR to upgrade vue-eslint-parser from 9.4.2 to 9.4.3. See this package in npm: vue-eslint-parser See this project in Snyk: https://app.snyk.io/org/akamai-esg-pilot-org/project/08730ed5-aba9-49b2-868f-15e5e652ebcc?utm_source=github&utm_medium=referral&page=upgrade-pr Co-authored-by: snyk-bot --- html/pfappserver/root/package-lock.json | 9 +++++---- html/pfappserver/root/package.json | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/html/pfappserver/root/package-lock.json b/html/pfappserver/root/package-lock.json index a4d8b59e3bed..21929b05c871 100644 --- a/html/pfappserver/root/package-lock.json +++ b/html/pfappserver/root/package-lock.json @@ -30,7 +30,7 @@ "vue": "2.6.14", "vue-awesome": "^4.5.0", "vue-browser-acl": "^0.15.4", - "vue-eslint-parser": "^9.4.2", + "vue-eslint-parser": "^9.4.3", "vue-i18n": "^8.28.2", "vue-loading-overlay": "3.4.2", "vue-multiselect": "2.1.9", @@ -25694,9 +25694,10 @@ } }, "node_modules/vue-eslint-parser": { - "version": "9.4.2", - "resolved": "https://registry.npmjs.org/vue-eslint-parser/-/vue-eslint-parser-9.4.2.tgz", - "integrity": "sha512-Ry9oiGmCAK91HrKMtCrKFWmSFWvYkpGglCeFAIqDdr9zdXmMMpJOmUJS7WWsW7fX81h6mwHmUZCQQ1E0PkSwYQ==", + "version": "9.4.3", + "resolved": "https://registry.npmjs.org/vue-eslint-parser/-/vue-eslint-parser-9.4.3.tgz", + "integrity": "sha512-2rYRLWlIpaiN8xbPiDyXZXRgLGOtWxERV7ND5fFAv5qo1D2N9Fu9MNajBNc6o13lZ+24DAWCkQCvj4klgmcITg==", + "license": "MIT", "dependencies": { "debug": "^4.3.4", "eslint-scope": "^7.1.1", diff --git a/html/pfappserver/root/package.json b/html/pfappserver/root/package.json index 8295df5b7da4..16adc7c23a5e 100644 --- a/html/pfappserver/root/package.json +++ b/html/pfappserver/root/package.json @@ -32,7 +32,7 @@ "vue": "2.6.14", "vue-awesome": "^4.5.0", "vue-browser-acl": "^0.15.4", - "vue-eslint-parser": "^9.4.2", + "vue-eslint-parser": "^9.4.3", "vue-i18n": "^8.28.2", "vue-loading-overlay": "3.4.2", "vue-multiselect": "2.1.9", From db0e05798454f047b3120bd64f86834ddd395f88 Mon Sep 17 00:00:00 2001 From: Extra Fu Date: Wed, 4 Dec 2024 10:32:59 -0500 Subject: [PATCH 159/176] fix: upgrade core-js from 3.37.1 to 3.39.0 (#8409) Snyk has created this PR to upgrade core-js from 3.37.1 to 3.39.0. See this package in npm: core-js See this project in Snyk: https://app.snyk.io/org/akamai-esg-pilot-org/project/08730ed5-aba9-49b2-868f-15e5e652ebcc?utm_source=github&utm_medium=referral&page=upgrade-pr Co-authored-by: snyk-bot --- html/pfappserver/root/package-lock.json | 8 ++++---- html/pfappserver/root/package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/html/pfappserver/root/package-lock.json b/html/pfappserver/root/package-lock.json index 21929b05c871..bc37e081f0a7 100644 --- a/html/pfappserver/root/package-lock.json +++ b/html/pfappserver/root/package-lock.json @@ -13,7 +13,7 @@ "axios": "1.7.8", "bootstrap": "4.6.1", "bootstrap-vue": "2.23.1", - "core-js": "3.37.1", + "core-js": "^3.39.0", "d3-force": "^3.0.0", "date-fns": "^1.30.1", "lodash": "^4.17.21", @@ -8212,9 +8212,9 @@ } }, "node_modules/core-js": { - "version": "3.37.1", - "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.37.1.tgz", - "integrity": "sha512-Xn6qmxrQZyB0FFY8E3bgRXei3lWDJHhvI+u0q9TKIYM49G8pAr0FgnnrFRAmsbptZL1yxRADVXn+x5AGsbBfyw==", + "version": "3.39.0", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.39.0.tgz", + "integrity": "sha512-raM0ew0/jJUqkJ0E6e8UDtl+y/7ktFivgWvqw8dNSQeNWoSDLvQ1H/RN3aPXB9tBd4/FhyR4RDPGhsNIMsAn7g==", "hasInstallScript": true, "license": "MIT", "funding": { diff --git a/html/pfappserver/root/package.json b/html/pfappserver/root/package.json index 16adc7c23a5e..23b1a80b5ba2 100644 --- a/html/pfappserver/root/package.json +++ b/html/pfappserver/root/package.json @@ -15,7 +15,7 @@ "axios": "1.7.8", "bootstrap": "4.6.1", "bootstrap-vue": "2.23.1", - "core-js": "3.37.1", + "core-js": "3.39.0", "d3-force": "^3.0.0", "date-fns": "^1.30.1", "lodash": "^4.17.21", From 7d7e88514ce2eddbb1887a55a1880d529ca4879b Mon Sep 17 00:00:00 2001 From: Extra Fu Date: Wed, 4 Dec 2024 10:33:29 -0500 Subject: [PATCH 160/176] fix: upgrade plotly.js-locales from 2.32.0 to 2.35.2 (#8408) Snyk has created this PR to upgrade plotly.js-locales from 2.32.0 to 2.35.2. See this package in npm: plotly.js-locales See this project in Snyk: https://app.snyk.io/org/akamai-esg-pilot-org/project/08730ed5-aba9-49b2-868f-15e5e652ebcc?utm_source=github&utm_medium=referral&page=upgrade-pr Co-authored-by: snyk-bot --- html/pfappserver/root/package-lock.json | 9 +++++---- html/pfappserver/root/package.json | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/html/pfappserver/root/package-lock.json b/html/pfappserver/root/package-lock.json index bc37e081f0a7..20ec4a0f27ea 100644 --- a/html/pfappserver/root/package-lock.json +++ b/html/pfappserver/root/package-lock.json @@ -23,7 +23,7 @@ "papaparse": "5.4.1", "pinia": "^2.1.7", "plotly.js-dist-min": "^2.35.2", - "plotly.js-locales": "^2.32.0", + "plotly.js-locales": "^2.35.2", "promised-debounce": "^0.4.2", "typeface-b612-mono": "1.1.13", "uuid": "^8.3.0", @@ -19189,9 +19189,10 @@ "license": "MIT" }, "node_modules/plotly.js-locales": { - "version": "2.32.0", - "resolved": "https://registry.npmjs.org/plotly.js-locales/-/plotly.js-locales-2.32.0.tgz", - "integrity": "sha512-XuUepYb2ouWRsOB5k5ZiHpZ14DmRF1pbvgPYjbSpG+cyzM0nxO9e9Jum3eWZS9sV6TvSdbPHLbH3KfJfFomkbQ==" + "version": "2.35.2", + "resolved": "https://registry.npmjs.org/plotly.js-locales/-/plotly.js-locales-2.35.2.tgz", + "integrity": "sha512-knVbtxP2hF1+LMAy0sxuv/rBCUGbr7fw5LRkH1/5L+ajmSwDuobppeYtLQSAnRwLP1bmMBWUQTYf+1cm/MhpQg==", + "license": "MIT" }, "node_modules/popper.js": { "version": "1.16.1", diff --git a/html/pfappserver/root/package.json b/html/pfappserver/root/package.json index 23b1a80b5ba2..76a5ed3fc48a 100644 --- a/html/pfappserver/root/package.json +++ b/html/pfappserver/root/package.json @@ -24,7 +24,7 @@ "mixpanel-browser": "^2.56.0", "papaparse": "5.4.1", "pinia": "^2.1.7", - "plotly.js-locales": "^2.32.0", + "plotly.js-locales": "^2.35.2", "plotly.js-dist-min": "^2.35.2", "promised-debounce": "^0.4.2", "typeface-b612-mono": "1.1.13", From 247a0dde216f584cb9f1cb2ab64539a132b86594 Mon Sep 17 00:00:00 2001 From: Extra Fu Date: Wed, 4 Dec 2024 10:37:07 -0500 Subject: [PATCH 161/176] fix: upgrade pinia from 2.1.7 to 2.2.6 (#8407) Snyk has created this PR to upgrade pinia from 2.1.7 to 2.2.6. See this package in npm: pinia See this project in Snyk: https://app.snyk.io/org/akamai-esg-pilot-org/project/08730ed5-aba9-49b2-868f-15e5e652ebcc?utm_source=github&utm_medium=referral&page=upgrade-pr Co-authored-by: snyk-bot Co-authored-by: Satkunas <3904468+satkunas@users.noreply.github.com> --- html/pfappserver/root/package-lock.json | 29 ++++++++++++++----------- html/pfappserver/root/package.json | 2 +- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/html/pfappserver/root/package-lock.json b/html/pfappserver/root/package-lock.json index 20ec4a0f27ea..593f32b99d41 100644 --- a/html/pfappserver/root/package-lock.json +++ b/html/pfappserver/root/package-lock.json @@ -21,7 +21,7 @@ "mime-types": "^2.1.35", "mixpanel-browser": "^2.56.0", "papaparse": "5.4.1", - "pinia": "^2.1.7", + "pinia": "^2.2.6", "plotly.js-dist-min": "^2.35.2", "plotly.js-locales": "^2.35.2", "promised-debounce": "^0.4.2", @@ -5608,9 +5608,10 @@ } }, "node_modules/@vue/devtools-api": { - "version": "6.6.1", - "resolved": "https://registry.npmjs.org/@vue/devtools-api/-/devtools-api-6.6.1.tgz", - "integrity": "sha512-LgPscpE3Vs0x96PzSSB4IGVSZXZBZHpfxs+ZA1d+VEPwHdOXowy/Y2CsvCAIFrf+ssVU1pD1jidj505EpUnfbA==" + "version": "6.6.4", + "resolved": "https://registry.npmjs.org/@vue/devtools-api/-/devtools-api-6.6.4.tgz", + "integrity": "sha512-sGhTPMuXqZ1rVOk32RylztWkfXTRhuS7vgAKv0zjqk8gbsHkJ7xfFf+jbySxt7tWObEJwyKaHMikV/WGDiQm8g==", + "license": "MIT" }, "node_modules/@vue/eslint-config-standard": { "version": "7.0.0", @@ -18986,12 +18987,13 @@ } }, "node_modules/pinia": { - "version": "2.1.7", - "resolved": "https://registry.npmjs.org/pinia/-/pinia-2.1.7.tgz", - "integrity": "sha512-+C2AHFtcFqjPih0zpYuvof37SFxMQ7OEG2zV9jRI12i9BOy3YQVAHwdKtyyc8pDcDyIc33WCIsZaCFWU7WWxGQ==", + "version": "2.2.6", + "resolved": "https://registry.npmjs.org/pinia/-/pinia-2.2.6.tgz", + "integrity": "sha512-vIsR8JkDN5Ga2vAxqOE2cJj4VtsHnzpR1Fz30kClxlh0yCHfec6uoMeM3e/ddqmwFUejK3NlrcQa/shnpyT4hA==", + "license": "MIT", "dependencies": { - "@vue/devtools-api": "^6.5.0", - "vue-demi": ">=0.14.5" + "@vue/devtools-api": "^6.6.3", + "vue-demi": "^0.14.10" }, "funding": { "url": "https://github.com/sponsors/posva" @@ -18999,7 +19001,7 @@ "peerDependencies": { "@vue/composition-api": "^1.4.0", "typescript": ">=4.4.4", - "vue": "^2.6.14 || ^3.3.0" + "vue": "^2.6.14 || ^3.5.11" }, "peerDependenciesMeta": { "@vue/composition-api": { @@ -25670,10 +25672,11 @@ } }, "node_modules/vue-demi": { - "version": "0.14.7", - "resolved": "https://registry.npmjs.org/vue-demi/-/vue-demi-0.14.7.tgz", - "integrity": "sha512-EOG8KXDQNwkJILkx/gPcoL/7vH+hORoBaKgGe+6W7VFMvCYJfmF2dGbvgDroVnI8LU7/kTu8mbjRZGBU1z9NTA==", + "version": "0.14.10", + "resolved": "https://registry.npmjs.org/vue-demi/-/vue-demi-0.14.10.tgz", + "integrity": "sha512-nMZBOwuzabUO0nLgIcc6rycZEebF6eeUfaiQx9+WSk8e29IbLvPU9feI6tqW4kTo3hvoYAJkMh8n8D0fuISphg==", "hasInstallScript": true, + "license": "MIT", "bin": { "vue-demi-fix": "bin/vue-demi-fix.js", "vue-demi-switch": "bin/vue-demi-switch.js" diff --git a/html/pfappserver/root/package.json b/html/pfappserver/root/package.json index 76a5ed3fc48a..a4103b424ae2 100644 --- a/html/pfappserver/root/package.json +++ b/html/pfappserver/root/package.json @@ -23,7 +23,7 @@ "mime-types": "^2.1.35", "mixpanel-browser": "^2.56.0", "papaparse": "5.4.1", - "pinia": "^2.1.7", + "pinia": "^2.2.6", "plotly.js-locales": "^2.35.2", "plotly.js-dist-min": "^2.35.2", "promised-debounce": "^0.4.2", From 1ede18c659803e5b112dd65a0836347fea032414 Mon Sep 17 00:00:00 2001 From: Darren+Satkunas Date: Wed, 4 Dec 2024 15:38:14 +0000 Subject: [PATCH 162/176] update NEWS --- NEWS.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/NEWS.asciidoc b/NEWS.asciidoc index bf12983b88a4..9fb589fc813f 100644 --- a/NEWS.asciidoc +++ b/NEWS.asciidoc @@ -62,7 +62,7 @@ For a list of compatibility related changes see the < Date: Wed, 4 Dec 2024 14:41:39 -0500 Subject: [PATCH 163/176] Fixes #8403 --- lib/pf/ssl.pm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/pf/ssl.pm b/lib/pf/ssl.pm index 3a4a63c5389b..b9c6ccacc85c 100644 --- a/lib/pf/ssl.pm +++ b/lib/pf/ssl.pm @@ -215,7 +215,7 @@ sub fetch_all_intermediates { } my $ca_info = $ca_info_ext->to_string; - if($ca_info =~ /CA Issuers\s*-\s*URI:(.*)\n/) { + if($ca_info =~ /CA Issuers\s*-\s*URI:(.*)/ms) { my $url = $1; get_logger->info("Downloading certificate at $url"); From 331b7b8397088842022e9135e286f167919b10f8 Mon Sep 17 00:00:00 2001 From: Zhihao Ma Date: Wed, 4 Dec 2024 16:37:00 -0500 Subject: [PATCH 164/176] increase ntlm backend respond time to 5s --- go/ntlm/ntlm.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/go/ntlm/ntlm.go b/go/ntlm/ntlm.go index f47ae1365d87..798c119167b9 100644 --- a/go/ntlm/ntlm.go +++ b/go/ntlm/ntlm.go @@ -25,7 +25,7 @@ func CheckMachineAccountPassword(ctx context.Context, backendPort string) (bool, url := "http://containers-gateway.internal:" + backendPort + "/ntlm/connect" client := &http.Client{ - Timeout: 2 * time.Second, + Timeout: 5 * time.Second, } response, err := client.Get(url) if err != nil { @@ -48,7 +48,7 @@ func CheckMachineAccountWithGivenPassword(ctx context.Context, backendPort strin url := "http://containers-gateway.internal:" + backendPort + "/ntlm/connect" client := &http.Client{ - Timeout: 2 * time.Second, + Timeout: 5 * time.Second, } jsonData := map[string]string{ @@ -78,7 +78,7 @@ func ReportMSEvent(ctx context.Context, backendPort string, jsonData any) error url := "http://containers-gateway.internal:" + backendPort + "/event/report" client := &http.Client{ - Timeout: 2 * time.Second, + Timeout: 5 * time.Second, } jsonBytes, _ := json.Marshal(jsonData) From 26c7f8d6cd38d1c72c2ffec5158af42c2fd1aeb0 Mon Sep 17 00:00:00 2001 From: James Rouzier Date: Thu, 5 Dec 2024 09:42:31 -0500 Subject: [PATCH 165/176] feature/kakfa-config (#8421) * Update fields * Add controller for kafka config * register routes * Implement get * Update form * New fields * Add tests * Fix field definition * remove redundant import * Formatting of data * Implement PATCH * Add test * Clear config * update tests * Add options * Check if controller can handle primary_key * update OAS * add Kafka configuration to UI * add service * fix indentation --------- Co-authored-by: Darren+Satkunas --- docs/api/spec/openapi.json | 957 +++++++++++++++--- docs/api/spec/openapi.yaml | 709 +++++++++++-- .../lib/pfappserver/Form/Config/Kafka.pm | 106 ++ .../pfappserver/Form/Config/Pfcron/pfflow.pm | 40 + .../lib/pfappserver/Form/Field/NameVal.pm | 63 ++ .../lib/pfappserver/Form/Field/UserPass.pm | 59 ++ .../src/components/new/BaseFormGroupArray.vue | 13 +- .../root/src/composables/useInputValidator.js | 5 +- .../src/views/Configuration/_router/index.js | 8 +- .../root/src/views/Configuration/index.vue | 6 +- .../src/views/Configuration/kafka/_api.js | 20 + .../kafka/_components/BaseAuth.vue | 117 +++ .../kafka/_components/BaseClusterConfig.vue | 115 +++ .../kafka/_components/BaseFormGroupAuths.js | 35 + .../_components/BaseFormGroupClusterConfig.js | 35 + .../_components/BaseFormGroupHostConfig.js | 35 + .../_components/BaseFormGroupHostConfigs.js | 35 + .../_components/BaseFormGroupIptables.js | 28 + .../kafka/_components/BaseHostConfig.vue | 117 +++ .../_components/BaseHostConfigConfig.vue | 115 +++ .../kafka/_components/TheForm.vue | 95 ++ .../kafka/_components/TheView.js | 27 + .../Configuration/kafka/_components/index.js | 27 + .../kafka/_composables/useResource.js | 23 + .../src/views/Configuration/kafka/_router.js | 25 + .../src/views/Configuration/kafka/_store.js | 94 ++ .../src/views/Configuration/kafka/config.js | 34 + .../src/views/Configuration/kafka/schema.js | 52 + .../maintenanceTasks/_components/TheForm.vue | 59 ++ .../maintenanceTasks/_components/index.js | 9 + lib/pf/UnifiedApi.pm | 9 + lib/pf/UnifiedApi/Controller/Config.pm | 1 - lib/pf/UnifiedApi/Controller/Config/Kafka.pm | 684 +++++++++++++ lib/pf/UnifiedApi/OpenAPI/Generator/Config.pm | 12 +- .../UnifiedApi/Controller/Config/Kafka.t | 299 ++++++ 35 files changed, 3777 insertions(+), 291 deletions(-) create mode 100644 html/pfappserver/lib/pfappserver/Form/Config/Kafka.pm create mode 100644 html/pfappserver/lib/pfappserver/Form/Field/NameVal.pm create mode 100644 html/pfappserver/lib/pfappserver/Form/Field/UserPass.pm create mode 100644 html/pfappserver/root/src/views/Configuration/kafka/_api.js create mode 100644 html/pfappserver/root/src/views/Configuration/kafka/_components/BaseAuth.vue create mode 100644 html/pfappserver/root/src/views/Configuration/kafka/_components/BaseClusterConfig.vue create mode 100644 html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupAuths.js create mode 100644 html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupClusterConfig.js create mode 100644 html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupHostConfig.js create mode 100644 html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupHostConfigs.js create mode 100644 html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupIptables.js create mode 100644 html/pfappserver/root/src/views/Configuration/kafka/_components/BaseHostConfig.vue create mode 100644 html/pfappserver/root/src/views/Configuration/kafka/_components/BaseHostConfigConfig.vue create mode 100644 html/pfappserver/root/src/views/Configuration/kafka/_components/TheForm.vue create mode 100644 html/pfappserver/root/src/views/Configuration/kafka/_components/TheView.js create mode 100644 html/pfappserver/root/src/views/Configuration/kafka/_components/index.js create mode 100644 html/pfappserver/root/src/views/Configuration/kafka/_composables/useResource.js create mode 100644 html/pfappserver/root/src/views/Configuration/kafka/_router.js create mode 100644 html/pfappserver/root/src/views/Configuration/kafka/_store.js create mode 100644 html/pfappserver/root/src/views/Configuration/kafka/config.js create mode 100644 html/pfappserver/root/src/views/Configuration/kafka/schema.js create mode 100644 lib/pf/UnifiedApi/Controller/Config/Kafka.pm create mode 100644 t/unittest/UnifiedApi/Controller/Config/Kafka.t diff --git a/docs/api/spec/openapi.json b/docs/api/spec/openapi.json index 6bbc87105de1..a53b0bee4c2f 100644 --- a/docs/api/spec/openapi.json +++ b/docs/api/spec/openapi.json @@ -3523,38 +3523,8 @@ "properties" : { "meta" : { "properties" : { - "emailaddr" : { - "$ref" : "#/components/schemas/Meta" - }, - "fromaddr" : { - "$ref" : "#/components/schemas/Meta" - }, "id" : { "$ref" : "#/components/schemas/Meta" - }, - "smtp_encryption" : { - "$ref" : "#/components/schemas/Meta" - }, - "smtp_password" : { - "$ref" : "#/components/schemas/Meta" - }, - "smtp_port" : { - "$ref" : "#/components/schemas/Meta" - }, - "smtp_timeout" : { - "$ref" : "#/components/schemas/Meta" - }, - "smtp_username" : { - "$ref" : "#/components/schemas/Meta" - }, - "smtp_verifyssl" : { - "$ref" : "#/components/schemas/Meta" - }, - "smtpserver" : { - "$ref" : "#/components/schemas/Meta" - }, - "subjectprefix" : { - "$ref" : "#/components/schemas/Meta" } }, "type" : "object" @@ -4555,6 +4525,10 @@ "description" : "The IPv4 of the Active Directory server", "type" : "string" }, + "additional_machine_accounts" : { + "description" : "How many additional machine accounts should be created and used to parallel NTLM authentication", + "type" : "integer" + }, "bind_dn" : { "description" : "The username of a Domain Admin to use to join the server to the domain", "type" : "string" @@ -4693,6 +4667,9 @@ "ad_server" : { "$ref" : "#/components/schemas/Meta" }, + "additional_machine_accounts" : { + "$ref" : "#/components/schemas/Meta" + }, "bind_dn" : { "$ref" : "#/components/schemas/Meta" }, @@ -4813,22 +4790,22 @@ "$ref" : "#/components/schemas/ConfigEventHandlerMetaSubTypeSecurityOnion" }, { - "$ref" : "#/components/schemas/ConfigEventHandlerMetaSubTypeSnort" + "$ref" : "#/components/schemas/ConfigEventHandlerMetaSubTypeNexpose" }, { - "$ref" : "#/components/schemas/ConfigEventHandlerMetaSubTypeSuricataMd5" + "$ref" : "#/components/schemas/ConfigEventHandlerMetaSubTypeDhcp" }, { - "$ref" : "#/components/schemas/ConfigEventHandlerMetaSubTypeRegex" + "$ref" : "#/components/schemas/ConfigEventHandlerMetaSubTypeSuricataMd5" }, { - "$ref" : "#/components/schemas/ConfigEventHandlerMetaSubTypeNexpose" + "$ref" : "#/components/schemas/ConfigEventHandlerMetaSubTypeSnort" }, { - "$ref" : "#/components/schemas/ConfigEventHandlerMetaSubTypeDhcp" + "$ref" : "#/components/schemas/ConfigEventHandlerMetaSubTypeFortianalyser" }, { - "$ref" : "#/components/schemas/ConfigEventHandlerMetaSubTypeFortianalyser" + "$ref" : "#/components/schemas/ConfigEventHandlerMetaSubTypeRegex" } ] }, @@ -4917,6 +4894,9 @@ "path" : { "$ref" : "#/components/schemas/Meta" }, + "rate_limit" : { + "$ref" : "#/components/schemas/Meta" + }, "rules" : { "$ref" : "#/components/schemas/Meta" }, @@ -5165,6 +5145,20 @@ "description" : "Alert pipe", "type" : "string" }, + "rate_limit" : { + "description" : "Rate limit requests.", + "properties" : { + "interval" : { + "description" : "Interval", + "type" : "integer" + }, + "unit" : { + "description" : "Unit", + "type" : "string" + } + }, + "type" : "object" + }, "rules" : { "description" : "Rules", "items" : { @@ -5849,43 +5843,43 @@ "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeDeny" }, { - "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeMobileconfig" + "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeWindows" }, { - "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeAccept" + "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeLookup" }, { - "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeIntune" + "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeAccept" }, { - "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeJamf" + "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeDpsk" }, { - "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeAirwatch" + "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeMobileiron" }, { - "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeWindows" + "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeIntune" }, { - "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeLookup" + "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeJamf" }, { - "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeGoogleWorkspaceChromebook" + "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeKandji" }, { - "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeSentinelone" + "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeAndroid" }, { - "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeAndroid" + "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeAirwatch" }, { - "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeMobileiron" + "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeMobileconfig" }, { - "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeDpsk" + "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeGoogleWorkspaceChromebook" }, { - "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeKandji" + "$ref" : "#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeSentinelone" } ] }, @@ -8294,37 +8288,37 @@ "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypeJunipersrx" }, { - "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypeContentkeeper" + "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypeJsonrpc" }, { - "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypeFortigate" + "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypePaloalto" }, { - "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypeBarracudang" + "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypeContentkeeper" }, { "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypeSmoothwall" }, { - "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypePaloalto" - }, - { - "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypeJsonrpc" + "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypeWatchguard" }, { - "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypeCheckpoint" + "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypeLightspeedrocket" }, { "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypeFamilyzone" }, { - "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypeLightspeedrocket" + "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypeBarracudang" }, { - "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypeWatchguard" + "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypeCheckpoint" }, { "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypeIboss" + }, + { + "$ref" : "#/components/schemas/ConfigFirewallMetaSubTypeFortigate" } ] }, @@ -8332,6 +8326,9 @@ "properties" : { "meta" : { "properties" : { + "act_on_accounting_stop" : { + "$ref" : "#/components/schemas/Meta" + }, "cache_timeout" : { "$ref" : "#/components/schemas/Meta" }, @@ -8356,6 +8353,15 @@ "port" : { "$ref" : "#/components/schemas/Meta" }, + "sso_on_access_reevaluation" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_accounting" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_dhcp" : { + "$ref" : "#/components/schemas/Meta" + }, "type" : { "$ref" : "#/components/schemas/Meta" }, @@ -8378,6 +8384,9 @@ "properties" : { "meta" : { "properties" : { + "act_on_accounting_stop" : { + "$ref" : "#/components/schemas/Meta" + }, "cache_timeout" : { "$ref" : "#/components/schemas/Meta" }, @@ -8402,6 +8411,15 @@ "port" : { "$ref" : "#/components/schemas/Meta" }, + "sso_on_access_reevaluation" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_accounting" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_dhcp" : { + "$ref" : "#/components/schemas/Meta" + }, "type" : { "$ref" : "#/components/schemas/Meta" }, @@ -8427,6 +8445,9 @@ "properties" : { "meta" : { "properties" : { + "act_on_accounting_stop" : { + "$ref" : "#/components/schemas/Meta" + }, "cache_timeout" : { "$ref" : "#/components/schemas/Meta" }, @@ -8451,6 +8472,15 @@ "port" : { "$ref" : "#/components/schemas/Meta" }, + "sso_on_access_reevaluation" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_accounting" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_dhcp" : { + "$ref" : "#/components/schemas/Meta" + }, "type" : { "$ref" : "#/components/schemas/Meta" }, @@ -8473,6 +8503,9 @@ "properties" : { "meta" : { "properties" : { + "act_on_accounting_stop" : { + "$ref" : "#/components/schemas/Meta" + }, "cache_timeout" : { "$ref" : "#/components/schemas/Meta" }, @@ -8497,6 +8530,15 @@ "port" : { "$ref" : "#/components/schemas/Meta" }, + "sso_on_access_reevaluation" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_accounting" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_dhcp" : { + "$ref" : "#/components/schemas/Meta" + }, "type" : { "$ref" : "#/components/schemas/Meta" }, @@ -8519,6 +8561,9 @@ "properties" : { "meta" : { "properties" : { + "act_on_accounting_stop" : { + "$ref" : "#/components/schemas/Meta" + }, "cache_timeout" : { "$ref" : "#/components/schemas/Meta" }, @@ -8546,6 +8591,15 @@ "port" : { "$ref" : "#/components/schemas/Meta" }, + "sso_on_access_reevaluation" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_accounting" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_dhcp" : { + "$ref" : "#/components/schemas/Meta" + }, "type" : { "$ref" : "#/components/schemas/Meta" }, @@ -8568,6 +8622,9 @@ "properties" : { "meta" : { "properties" : { + "act_on_accounting_stop" : { + "$ref" : "#/components/schemas/Meta" + }, "cache_timeout" : { "$ref" : "#/components/schemas/Meta" }, @@ -8592,6 +8649,15 @@ "port" : { "$ref" : "#/components/schemas/Meta" }, + "sso_on_access_reevaluation" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_accounting" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_dhcp" : { + "$ref" : "#/components/schemas/Meta" + }, "type" : { "$ref" : "#/components/schemas/Meta" }, @@ -8614,6 +8680,9 @@ "properties" : { "meta" : { "properties" : { + "act_on_accounting_stop" : { + "$ref" : "#/components/schemas/Meta" + }, "cache_timeout" : { "$ref" : "#/components/schemas/Meta" }, @@ -8641,6 +8710,15 @@ "port" : { "$ref" : "#/components/schemas/Meta" }, + "sso_on_access_reevaluation" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_accounting" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_dhcp" : { + "$ref" : "#/components/schemas/Meta" + }, "type" : { "$ref" : "#/components/schemas/Meta" }, @@ -8663,6 +8741,9 @@ "properties" : { "meta" : { "properties" : { + "act_on_accounting_stop" : { + "$ref" : "#/components/schemas/Meta" + }, "cache_timeout" : { "$ref" : "#/components/schemas/Meta" }, @@ -8687,6 +8768,15 @@ "port" : { "$ref" : "#/components/schemas/Meta" }, + "sso_on_access_reevaluation" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_accounting" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_dhcp" : { + "$ref" : "#/components/schemas/Meta" + }, "type" : { "$ref" : "#/components/schemas/Meta" }, @@ -8712,6 +8802,9 @@ "properties" : { "meta" : { "properties" : { + "act_on_accounting_stop" : { + "$ref" : "#/components/schemas/Meta" + }, "cache_timeout" : { "$ref" : "#/components/schemas/Meta" }, @@ -8736,6 +8829,15 @@ "port" : { "$ref" : "#/components/schemas/Meta" }, + "sso_on_access_reevaluation" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_accounting" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_dhcp" : { + "$ref" : "#/components/schemas/Meta" + }, "type" : { "$ref" : "#/components/schemas/Meta" }, @@ -8761,6 +8863,9 @@ "properties" : { "meta" : { "properties" : { + "act_on_accounting_stop" : { + "$ref" : "#/components/schemas/Meta" + }, "cache_timeout" : { "$ref" : "#/components/schemas/Meta" }, @@ -8785,6 +8890,15 @@ "port" : { "$ref" : "#/components/schemas/Meta" }, + "sso_on_access_reevaluation" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_accounting" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_dhcp" : { + "$ref" : "#/components/schemas/Meta" + }, "type" : { "$ref" : "#/components/schemas/Meta" }, @@ -8807,6 +8921,9 @@ "properties" : { "meta" : { "properties" : { + "act_on_accounting_stop" : { + "$ref" : "#/components/schemas/Meta" + }, "cache_timeout" : { "$ref" : "#/components/schemas/Meta" }, @@ -8831,6 +8948,15 @@ "port" : { "$ref" : "#/components/schemas/Meta" }, + "sso_on_access_reevaluation" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_accounting" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_dhcp" : { + "$ref" : "#/components/schemas/Meta" + }, "transport" : { "$ref" : "#/components/schemas/Meta" }, @@ -8859,6 +8985,9 @@ "properties" : { "meta" : { "properties" : { + "act_on_accounting_stop" : { + "$ref" : "#/components/schemas/Meta" + }, "cache_timeout" : { "$ref" : "#/components/schemas/Meta" }, @@ -8883,6 +9012,15 @@ "port" : { "$ref" : "#/components/schemas/Meta" }, + "sso_on_access_reevaluation" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_accounting" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_dhcp" : { + "$ref" : "#/components/schemas/Meta" + }, "type" : { "$ref" : "#/components/schemas/Meta" }, @@ -8905,6 +9043,9 @@ "properties" : { "meta" : { "properties" : { + "act_on_accounting_stop" : { + "$ref" : "#/components/schemas/Meta" + }, "cache_timeout" : { "$ref" : "#/components/schemas/Meta" }, @@ -8929,6 +9070,15 @@ "port" : { "$ref" : "#/components/schemas/Meta" }, + "sso_on_access_reevaluation" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_accounting" : { + "$ref" : "#/components/schemas/Meta" + }, + "sso_on_dhcp" : { + "$ref" : "#/components/schemas/Meta" + }, "type" : { "$ref" : "#/components/schemas/Meta" }, @@ -8949,6 +9099,10 @@ }, "ConfigFirewallSubTypeBarracudang" : { "properties" : { + "act_on_accounting_stop" : { + "description" : "Act on accounting stop", + "type" : "string" + }, "cache_timeout" : { "description" : "Adjust the \"Cache timeout\" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value.", "type" : "integer" @@ -8985,6 +9139,18 @@ "description" : "If you use an alternative port, please specify", "type" : "integer" }, + "sso_on_access_reevaluation" : { + "description" : "Sso on access reevaluation", + "type" : "string" + }, + "sso_on_accounting" : { + "description" : "Sso on accounting", + "type" : "string" + }, + "sso_on_dhcp" : { + "description" : "Sso on dhcp", + "type" : "string" + }, "type" : { "default" : "BarracudaNG", "description" : "Discriminator `BarracudaNG`", @@ -9016,6 +9182,10 @@ }, "ConfigFirewallSubTypeCheckpoint" : { "properties" : { + "act_on_accounting_stop" : { + "description" : "Act on accounting stop", + "type" : "string" + }, "cache_timeout" : { "description" : "Adjust the \"Cache timeout\" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value.", "type" : "integer" @@ -9052,6 +9222,18 @@ "description" : "If you use an alternative port, please specify", "type" : "integer" }, + "sso_on_access_reevaluation" : { + "description" : "Sso on access reevaluation", + "type" : "string" + }, + "sso_on_accounting" : { + "description" : "Sso on accounting", + "type" : "string" + }, + "sso_on_dhcp" : { + "description" : "Sso on dhcp", + "type" : "string" + }, "type" : { "default" : "Checkpoint", "description" : "Discriminator `Checkpoint`", @@ -9078,6 +9260,10 @@ }, "ConfigFirewallSubTypeContentkeeper" : { "properties" : { + "act_on_accounting_stop" : { + "description" : "Act on accounting stop", + "type" : "string" + }, "cache_timeout" : { "description" : "Adjust the \"Cache timeout\" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value.", "type" : "integer" @@ -9114,6 +9300,18 @@ "description" : "If you use an alternative port, please specify", "type" : "integer" }, + "sso_on_access_reevaluation" : { + "description" : "Sso on access reevaluation", + "type" : "string" + }, + "sso_on_accounting" : { + "description" : "Sso on accounting", + "type" : "string" + }, + "sso_on_dhcp" : { + "description" : "Sso on dhcp", + "type" : "string" + }, "type" : { "default" : "ContentKeeper", "description" : "Discriminator `ContentKeeper`", @@ -9140,6 +9338,10 @@ }, "ConfigFirewallSubTypeFamilyzone" : { "properties" : { + "act_on_accounting_stop" : { + "description" : "Act on accounting stop", + "type" : "string" + }, "cache_timeout" : { "description" : "Adjust the \"Cache timeout\" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value.", "type" : "integer" @@ -9180,6 +9382,18 @@ "description" : "If you use an alternative port, please specify", "type" : "integer" }, + "sso_on_access_reevaluation" : { + "description" : "Sso on access reevaluation", + "type" : "string" + }, + "sso_on_accounting" : { + "description" : "Sso on accounting", + "type" : "string" + }, + "sso_on_dhcp" : { + "description" : "Sso on dhcp", + "type" : "string" + }, "type" : { "default" : "FamilyZone", "description" : "Discriminator `FamilyZone`", @@ -9205,6 +9419,10 @@ }, "ConfigFirewallSubTypeFortigate" : { "properties" : { + "act_on_accounting_stop" : { + "description" : "Act on accounting stop", + "type" : "string" + }, "cache_timeout" : { "description" : "Adjust the \"Cache timeout\" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value.", "type" : "integer" @@ -9241,6 +9459,18 @@ "description" : "If you use an alternative port, please specify", "type" : "integer" }, + "sso_on_access_reevaluation" : { + "description" : "Sso on access reevaluation", + "type" : "string" + }, + "sso_on_accounting" : { + "description" : "Sso on accounting", + "type" : "string" + }, + "sso_on_dhcp" : { + "description" : "Sso on dhcp", + "type" : "string" + }, "type" : { "default" : "FortiGate", "description" : "Discriminator `FortiGate`", @@ -9267,6 +9497,10 @@ }, "ConfigFirewallSubTypeIboss" : { "properties" : { + "act_on_accounting_stop" : { + "description" : "Act on accounting stop", + "type" : "string" + }, "cache_timeout" : { "description" : "Adjust the \"Cache timeout\" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value.", "type" : "integer" @@ -9307,6 +9541,18 @@ "description" : "If you use an alternative port, please specify", "type" : "integer" }, + "sso_on_access_reevaluation" : { + "description" : "Sso on access reevaluation", + "type" : "string" + }, + "sso_on_accounting" : { + "description" : "Sso on accounting", + "type" : "string" + }, + "sso_on_dhcp" : { + "description" : "Sso on dhcp", + "type" : "string" + }, "type" : { "default" : "Iboss", "description" : "Discriminator `Iboss`", @@ -9333,6 +9579,10 @@ }, "ConfigFirewallSubTypeJsonrpc" : { "properties" : { + "act_on_accounting_stop" : { + "description" : "Act on accounting stop", + "type" : "string" + }, "cache_timeout" : { "description" : "Adjust the \"Cache timeout\" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value.", "type" : "integer" @@ -9369,6 +9619,18 @@ "description" : "If you use an alternative port, please specify", "type" : "integer" }, + "sso_on_access_reevaluation" : { + "description" : "Sso on access reevaluation", + "type" : "string" + }, + "sso_on_accounting" : { + "description" : "Sso on accounting", + "type" : "string" + }, + "sso_on_dhcp" : { + "description" : "Sso on dhcp", + "type" : "string" + }, "type" : { "default" : "JSONRPC", "description" : "Discriminator `JSONRPC`", @@ -9400,6 +9662,10 @@ }, "ConfigFirewallSubTypeJunipersrx" : { "properties" : { + "act_on_accounting_stop" : { + "description" : "Act on accounting stop", + "type" : "string" + }, "cache_timeout" : { "description" : "Adjust the \"Cache timeout\" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value.", "type" : "integer" @@ -9436,6 +9702,18 @@ "description" : "If you use an alternative port, please specify", "type" : "integer" }, + "sso_on_access_reevaluation" : { + "description" : "Sso on access reevaluation", + "type" : "string" + }, + "sso_on_accounting" : { + "description" : "Sso on accounting", + "type" : "string" + }, + "sso_on_dhcp" : { + "description" : "Sso on dhcp", + "type" : "string" + }, "type" : { "default" : "JuniperSRX", "description" : "Discriminator `JuniperSRX`", @@ -9467,6 +9745,10 @@ }, "ConfigFirewallSubTypeLightspeedrocket" : { "properties" : { + "act_on_accounting_stop" : { + "description" : "Act on accounting stop", + "type" : "string" + }, "cache_timeout" : { "description" : "Adjust the \"Cache timeout\" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value.", "type" : "integer" @@ -9503,6 +9785,18 @@ "description" : "If you use an alternative port, please specify", "type" : "integer" }, + "sso_on_access_reevaluation" : { + "description" : "Sso on access reevaluation", + "type" : "string" + }, + "sso_on_accounting" : { + "description" : "Sso on accounting", + "type" : "string" + }, + "sso_on_dhcp" : { + "description" : "Sso on dhcp", + "type" : "string" + }, "type" : { "default" : "LightSpeedRocket", "description" : "Discriminator `LightSpeedRocket`", @@ -9529,6 +9823,10 @@ }, "ConfigFirewallSubTypePaloalto" : { "properties" : { + "act_on_accounting_stop" : { + "description" : "Act on accounting stop", + "type" : "string" + }, "cache_timeout" : { "description" : "Adjust the \"Cache timeout\" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value.", "type" : "integer" @@ -9565,6 +9863,18 @@ "description" : "If you use an alternative port, please specify. This parameter is ignored when the Syslog transport is selected.", "type" : "integer" }, + "sso_on_access_reevaluation" : { + "description" : "Sso on access reevaluation", + "type" : "string" + }, + "sso_on_accounting" : { + "description" : "Sso on accounting", + "type" : "string" + }, + "sso_on_dhcp" : { + "description" : "Sso on dhcp", + "type" : "string" + }, "transport" : { "description" : "Transport", "type" : "string" @@ -9598,6 +9908,10 @@ }, "ConfigFirewallSubTypeSmoothwall" : { "properties" : { + "act_on_accounting_stop" : { + "description" : "Act on accounting stop", + "type" : "string" + }, "cache_timeout" : { "description" : "Adjust the \"Cache timeout\" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value.", "type" : "integer" @@ -9634,6 +9948,18 @@ "description" : "If you use an alternative port, please specify", "type" : "integer" }, + "sso_on_access_reevaluation" : { + "description" : "Sso on access reevaluation", + "type" : "string" + }, + "sso_on_accounting" : { + "description" : "Sso on accounting", + "type" : "string" + }, + "sso_on_dhcp" : { + "description" : "Sso on dhcp", + "type" : "string" + }, "type" : { "default" : "SmoothWall", "description" : "Discriminator `SmoothWall`", @@ -9660,6 +9986,10 @@ }, "ConfigFirewallSubTypeWatchguard" : { "properties" : { + "act_on_accounting_stop" : { + "description" : "Act on accounting stop", + "type" : "string" + }, "cache_timeout" : { "description" : "Adjust the \"Cache timeout\" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value.", "type" : "integer" @@ -9696,6 +10026,18 @@ "description" : "If you use an alternative port, please specify", "type" : "integer" }, + "sso_on_access_reevaluation" : { + "description" : "Sso on access reevaluation", + "type" : "string" + }, + "sso_on_accounting" : { + "description" : "Sso on accounting", + "type" : "string" + }, + "sso_on_dhcp" : { + "description" : "Sso on dhcp", + "type" : "string" + }, "type" : { "default" : "WatchGuard", "description" : "Discriminator `WatchGuard`", @@ -10009,6 +10351,169 @@ }, "type" : "object" }, + "ConfigKafka" : { + "properties" : { + "admin" : { + "description" : "Admin", + "properties" : { + "pass" : { + "description" : "Password", + "type" : "string" + }, + "user" : { + "description" : "User", + "type" : "string" + } + }, + "type" : "object" + }, + "auths" : { + "description" : "Auths", + "items" : { + "description" : "Auth", + "properties" : { + "pass" : { + "description" : "Password", + "type" : "string" + }, + "user" : { + "description" : "User", + "type" : "string" + } + }, + "type" : "object" + }, + "type" : "array" + }, + "cluster" : { + "description" : "Cluster", + "items" : { + "description" : "Cluster", + "properties" : { + "name" : { + "description" : "Name", + "type" : "string" + }, + "value" : { + "description" : "Value", + "type" : "string" + } + }, + "type" : "object" + }, + "type" : "array" + }, + "host_configs" : { + "description" : "Host configs", + "items" : { + "description" : "Host config", + "properties" : { + "config" : { + "description" : "Config", + "items" : { + "description" : "Config", + "properties" : { + "name" : { + "description" : "Name", + "type" : "string" + }, + "value" : { + "description" : "Value", + "type" : "string" + } + }, + "type" : "object" + }, + "type" : "array" + }, + "host" : { + "description" : "Host", + "type" : "string" + } + }, + "type" : "object" + }, + "type" : "array" + }, + "iptables" : { + "description" : "Iptables", + "properties" : { + "clients" : { + "description" : "Clients", + "items" : { + "description" : "Client", + "type" : "string" + }, + "type" : "array" + }, + "cluster_ips" : { + "description" : "Cluster ips", + "items" : { + "description" : "Cluster ip", + "type" : "string" + }, + "type" : "array" + } + }, + "type" : "object" + } + }, + "type" : "object" + }, + "ConfigKafkaList" : { + "allOf" : [ + { + "$ref" : "#/components/schemas/Iterable" + }, + { + "properties" : { + "items" : { + "items" : { + "$ref" : "#/components/schemas/ConfigKafka" + }, + "type" : "array" + } + }, + "type" : "object" + } + ] + }, + "ConfigKafkaMeta" : { + "properties" : { + "meta" : { + "properties" : { + "admin" : { + "$ref" : "#/components/schemas/Meta" + }, + "auths" : { + "$ref" : "#/components/schemas/Meta" + }, + "cluster" : { + "$ref" : "#/components/schemas/Meta" + }, + "host_configs" : { + "$ref" : "#/components/schemas/Meta" + }, + "iptables" : { + "$ref" : "#/components/schemas/Meta" + } + }, + "type" : "object" + } + }, + "type" : "object" + }, + "ConfigKafkaWrapped" : { + "properties" : { + "item" : { + "$ref" : "#/components/schemas/ConfigKafka" + }, + "status" : { + "type" : "integer" + } + }, + "type" : "object" + }, "ConfigL2Network" : { "properties" : { "algorithm" : { @@ -10213,91 +10718,91 @@ }, "oneOf" : [ { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeOption82Query" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeNodesMaintenance" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypePersonCleanup" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeCleanupChiDatabaseCache" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeIp6logCleanup" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypePasswordOfTheDay" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeDnsAuditLogCleanup" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeFingerbankDataUpdate" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeSwitchCacheLldplocalportDescription" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeLocationlogCleanup" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeCleanupChiDatabaseCache" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeAdminApiAuditLogCleanup" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeAuthLogCleanup" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeClusterCheck" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeProvisioningCompliancePoll" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeFlushRadiusAuditLog" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeClusterCheck" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeNodeCleanup" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeSecurityEventMaintenance" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeCertificatesCheck" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypePkiCertificatesCheck" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeIp6logCleanup" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeAcctMaintenance" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypePurgeBinaryLogs" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeRadiusAuditLogCleanup" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypePersonCleanup" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypePfflow" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeProvisioningCompliancePoll" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeFlushDnsAuditLog" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeAuthLogCleanup" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeBandwidthMaintenance" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeRadiusAuditLogCleanup" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeFingerbankDataUpdate" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeIp4logCleanup" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypePurgeBinaryLogs" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeUbiquitiApMacToIp" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeNodesMaintenance" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeAcctMaintenance" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeCertificatesCheck" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeNodeCurrentSessionCleanup" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeAdminApiAuditLogCleanup" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeBandwidthMaintenance" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeNodeCurrentSessionCleanup" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeOption82Query" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeNodeCleanup" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeSwitchCacheLldplocalportDescription" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeFlushRadiusAuditLog" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeSecurityEventMaintenance" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeAcctCleanup" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeFlushDnsAuditLog" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypePasswordOfTheDay" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeDnsAuditLogCleanup" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeIp4logCleanup" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypePkiCertificatesCheck" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeUbiquitiApMacToIp" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypePfflow" }, { - "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeLocationlogCleanup" + "$ref" : "#/components/schemas/ConfigMaintenanceTaskMetaSubTypeAcctCleanup" } ] }, @@ -10929,15 +11434,27 @@ "properties" : { "meta" : { "properties" : { + "filter_events" : { + "$ref" : "#/components/schemas/Meta" + }, "group_id" : { "$ref" : "#/components/schemas/Meta" }, + "heuristics" : { + "$ref" : "#/components/schemas/Meta" + }, "id" : { "$ref" : "#/components/schemas/Meta" }, "kafka_brokers" : { "$ref" : "#/components/schemas/Meta" }, + "kakfa_pass" : { + "$ref" : "#/components/schemas/Meta" + }, + "kakfa_user" : { + "$ref" : "#/components/schemas/Meta" + }, "read_topic" : { "$ref" : "#/components/schemas/Meta" }, @@ -10955,6 +11472,9 @@ }, "type" : { "$ref" : "#/components/schemas/Meta" + }, + "uuid" : { + "$ref" : "#/components/schemas/Meta" } }, "type" : "object" @@ -12231,10 +12751,18 @@ }, "ConfigMaintenanceTaskSubTypePfflow" : { "properties" : { + "filter_events" : { + "description" : "Filter incoming events", + "type" : "string" + }, "group_id" : { "description" : "The Kafka Consumer Group ID ", "type" : "string" }, + "heuristics" : { + "description" : "Heuristics", + "type" : "string" + }, "id" : { "description" : "Pfcron Name", "type" : "string" @@ -12243,6 +12771,14 @@ "description" : "Kafka Brokers", "type" : "string" }, + "kakfa_pass" : { + "description" : "Kafka Password", + "type" : "string" + }, + "kakfa_user" : { + "description" : "Kafka Username", + "type" : "string" + }, "read_topic" : { "description" : "The Kafka topic to read pfflows from", "type" : "string" @@ -12267,6 +12803,10 @@ "default" : "pfflow", "description" : "Discriminator `pfflow`", "type" : "string" + }, + "uuid" : { + "description" : "UUID ", + "type" : "string" } }, "required" : [ @@ -13493,97 +14033,97 @@ }, "oneOf" : [ { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeProvisioning" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthFacebook" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeSelectrole" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeChained" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthFacebook" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationNull" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationNull" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationEmail" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeSslInspection" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationChoice" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthGithub" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeSurvey" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeMessage" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationSponsor" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeUrl" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationSms" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthOpenid" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeMfa" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationChoice" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeRootsso" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthLinkedin" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeShowlocalaccount" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeSurvey" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthLinkedin" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationPassword" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeUrl" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthGoogle" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthGithub" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeChoice" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeSelectrole" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeShowlocalaccount" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationSaml" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeFixedrole" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationBlackhole" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauth" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeRoot" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeRootsso" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeChoice" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationSponsor" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationPassword" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeMfa" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationBilling" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationSaml" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthOpenid" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeRoot" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeMessage" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationEmail" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthGoogle" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationLogin" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauth" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationSms" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeSslInspection" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationBlackhole" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeFixedrole" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthentication" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationLogin" }, { "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthWindowslive" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationBilling" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeAuthentication" }, { - "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeChained" + "$ref" : "#/components/schemas/ConfigPortalModuleMetaSubTypeProvisioning" } ] }, @@ -16811,46 +17351,46 @@ }, "oneOf" : [ { - "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeAccept" + "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeDeny" }, { - "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeMobileconfig" + "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeWindows" }, { - "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeDeny" + "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeMobileconfig" }, { - "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeSentinelone" + "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeAirwatch" }, { - "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeWindows" + "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeKandji" }, { - "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeGoogleWorkspaceChromebook" + "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeAndroid" }, { - "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeAirwatch" + "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeGoogleWorkspaceChromebook" }, { - "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeJamf" + "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeJamfcloud" }, { - "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeIntune" + "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeSentinelone" }, { - "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeMobileiron" + "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeAccept" }, { - "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeAndroid" + "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeDpsk" }, { - "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeJamfcloud" + "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeMobileiron" }, { - "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeKandji" + "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeJamf" }, { - "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeDpsk" + "$ref" : "#/components/schemas/ConfigProvisioningMetaSubTypeIntune" } ] }, @@ -21567,97 +22107,97 @@ "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeOpenid" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeWindowslive" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeStripe" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeBlackhole" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeAd" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeHtpasswd" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeBlackhole" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeHttp" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeAzuread" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeEdir" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeWindowslive" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeEmail" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeKickbox" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeNull" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeSms" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeAd" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeSaml" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeSaml" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeHtpasswd" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeAzuread" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeSponsoremail" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeAdminproxy" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeSql" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeAuthorization" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeNull" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeRadius" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeHttp" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypePotd" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeAdminproxy" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeFacebook" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeEmail" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeGoogle" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeEduroam" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeLdap" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypePotd" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeGoogleworkspaceldap" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeRadius" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeSql" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeGithub" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeStripe" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeGoogle" }, { "$ref" : "#/components/schemas/ConfigSourceMetaSubTypePaypal" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeKickbox" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeKerberos" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeSms" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeAuthorization" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeLinkedin" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeEdir" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeTwilio" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeFacebook" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeGithub" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeLdap" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeKerberos" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeTwilio" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeEaptls" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeClickatell" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeClickatell" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeEaptls" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeSponsoremail" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeLinkedin" }, { - "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeEduroam" + "$ref" : "#/components/schemas/ConfigSourceMetaSubTypeGoogleworkspaceldap" } ] }, @@ -31220,10 +31760,10 @@ }, "oneOf" : [ { - "$ref" : "#/components/schemas/ConfigSyslogForwarderMetaSubTypeServer" + "$ref" : "#/components/schemas/ConfigSyslogForwarderMetaSubTypeFile" }, { - "$ref" : "#/components/schemas/ConfigSyslogForwarderMetaSubTypeFile" + "$ref" : "#/components/schemas/ConfigSyslogForwarderMetaSubTypeServer" } ] }, @@ -51394,6 +51934,95 @@ ] } }, + "/api/v1/config/kafka" : { + "description" : "pf::UnifiedApi::Controller::Config::Kafka\n\n field_meta\n Get a field's meta data\n\n field_type\n Find the field type\n\n field_is_required\n Check if the field is required\n\n field_placeholder\n Get the placeholder for the field\n\n field_resource_placeholder\n The place holder for the field\n\n field_meta_array_items\n Get the meta for the items of the array\n\n field_allowed\n The allowed fields\n\n field_default\n Get the default value of a field\n\n field_extra_meta\n Get the extra meta data for a field\n\n field_meta_object_properties\n Get the properties of a field\n\n field_text_meta\n Update text field meta data\n\n field_allowed_lookup\n field_allowed_lookup\n\n map_options\n map_options\n\n map_option\n map_option\n\n format_form_errors\n format_form_errors", + "get" : { + "description" : "Get an item.", + "operationId" : "api.v1.Config.Kafka.get", + "parameters" : [], + "responses" : { + "200" : { + "content" : { + "application/json" : { + "schema" : { + "$ref" : "#/components/schemas/ConfigKafkaWrapped" + } + } + }, + "description" : "Request successful. Response contains a specific resource." + }, + "401" : { + "$ref" : "#/components/responses/Forbidden" + }, + "404" : { + "$ref" : "#/components/responses/NotFound" + } + }, + "tags" : [ + "Config/Kafka" + ] + }, + "options" : { + "description" : "Get meta for a new item.", + "operationId" : "api.v1.Config.Kafka.options", + "parameters" : [], + "responses" : { + "200" : { + "content" : { + "application/json" : { + "schema" : { + "$ref" : "#/components/schemas/ConfigKafkaMeta" + } + } + }, + "description" : "Request successful. Response contains meta for a resource." + }, + "401" : { + "$ref" : "#/components/responses/Forbidden" + }, + "404" : { + "$ref" : "#/components/responses/NotFound" + } + }, + "tags" : [ + "Config/Kafka" + ] + }, + "patch" : { + "description" : "Update an item.", + "operationId" : "api.v1.Config.Kafka.update", + "parameters" : [], + "requestBody" : { + "content" : { + "application/json" : { + "schema" : { + "$ref" : "#/components/schemas/ConfigKafka" + } + } + } + }, + "responses" : { + "201" : { + "$ref" : "#/components/responses/Updated" + }, + "400" : { + "$ref" : "#/components/responses/BadRequest" + }, + "401" : { + "$ref" : "#/components/responses/Forbidden" + }, + "404" : { + "$ref" : "#/components/responses/NotFound" + }, + "422" : { + "$ref" : "#/components/responses/UnprocessableEntity" + } + }, + "tags" : [ + "Config/Kafka" + ] + } + }, "/api/v1/config/l2_network/{network_id}" : { "description" : "pf::UnifiedApi::Controller::Config::L2Networks", "get" : { diff --git a/docs/api/spec/openapi.yaml b/docs/api/spec/openapi.yaml index 9afad2c612d4..fbb1abf9fd46 100644 --- a/docs/api/spec/openapi.yaml +++ b/docs/api/spec/openapi.yaml @@ -2306,28 +2306,8 @@ components: properties: meta: properties: - emailaddr: - $ref: '#/components/schemas/Meta' - fromaddr: - $ref: '#/components/schemas/Meta' id: $ref: '#/components/schemas/Meta' - smtp_encryption: - $ref: '#/components/schemas/Meta' - smtp_password: - $ref: '#/components/schemas/Meta' - smtp_port: - $ref: '#/components/schemas/Meta' - smtp_timeout: - $ref: '#/components/schemas/Meta' - smtp_username: - $ref: '#/components/schemas/Meta' - smtp_verifyssl: - $ref: '#/components/schemas/Meta' - smtpserver: - $ref: '#/components/schemas/Meta' - subjectprefix: - $ref: '#/components/schemas/Meta' type: object type: object ConfigBaseWrapped: @@ -3049,6 +3029,10 @@ components: ad_server: description: The IPv4 of the Active Directory server type: string + additional_machine_accounts: + description: How many additional machine accounts should be created and + used to parallel NTLM authentication + type: integer bind_dn: description: The username of a Domain Admin to use to join the server to the domain @@ -3165,6 +3149,8 @@ components: $ref: '#/components/schemas/Meta' ad_server: $ref: '#/components/schemas/Meta' + additional_machine_accounts: + $ref: '#/components/schemas/Meta' bind_dn: $ref: '#/components/schemas/Meta' bind_pass: @@ -3240,12 +3226,12 @@ components: oneOf: - $ref: '#/components/schemas/ConfigEventHandlerMetaSubTypeSuricata' - $ref: '#/components/schemas/ConfigEventHandlerMetaSubTypeSecurityOnion' - - $ref: '#/components/schemas/ConfigEventHandlerMetaSubTypeSnort' - - $ref: '#/components/schemas/ConfigEventHandlerMetaSubTypeSuricataMd5' - - $ref: '#/components/schemas/ConfigEventHandlerMetaSubTypeRegex' - $ref: '#/components/schemas/ConfigEventHandlerMetaSubTypeNexpose' - $ref: '#/components/schemas/ConfigEventHandlerMetaSubTypeDhcp' + - $ref: '#/components/schemas/ConfigEventHandlerMetaSubTypeSuricataMd5' + - $ref: '#/components/schemas/ConfigEventHandlerMetaSubTypeSnort' - $ref: '#/components/schemas/ConfigEventHandlerMetaSubTypeFortianalyser' + - $ref: '#/components/schemas/ConfigEventHandlerMetaSubTypeRegex' ConfigEventHandlerMetaSubTypeDhcp: properties: meta: @@ -3302,6 +3288,8 @@ components: $ref: '#/components/schemas/Meta' path: $ref: '#/components/schemas/Meta' + rate_limit: + $ref: '#/components/schemas/Meta' rules: $ref: '#/components/schemas/Meta' status: @@ -3472,6 +3460,16 @@ components: path: description: Alert pipe type: string + rate_limit: + description: Rate limit requests. + properties: + interval: + description: Interval + type: integer + unit: + description: Unit + type: string + type: object rules: description: Rules items: @@ -3947,19 +3945,19 @@ components: propertyName: type oneOf: - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeDeny' - - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeMobileconfig' + - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeWindows' + - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeLookup' - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeAccept' + - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeDpsk' + - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeMobileiron' - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeIntune' - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeJamf' + - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeKandji' + - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeAndroid' - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeAirwatch' - - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeWindows' - - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeLookup' + - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeMobileconfig' - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeGoogleWorkspaceChromebook' - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeSentinelone' - - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeAndroid' - - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeMobileiron' - - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeDpsk' - - $ref: '#/components/schemas/ConfigFilterEnginesProvisioningFilterMetaSubTypeKandji' ConfigFilterEnginesProvisioningFilterMetaSubTypeAccept: properties: meta: @@ -5654,21 +5652,23 @@ components: propertyName: type oneOf: - $ref: '#/components/schemas/ConfigFirewallMetaSubTypeJunipersrx' + - $ref: '#/components/schemas/ConfigFirewallMetaSubTypeJsonrpc' + - $ref: '#/components/schemas/ConfigFirewallMetaSubTypePaloalto' - $ref: '#/components/schemas/ConfigFirewallMetaSubTypeContentkeeper' - - $ref: '#/components/schemas/ConfigFirewallMetaSubTypeFortigate' - - $ref: '#/components/schemas/ConfigFirewallMetaSubTypeBarracudang' - $ref: '#/components/schemas/ConfigFirewallMetaSubTypeSmoothwall' - - $ref: '#/components/schemas/ConfigFirewallMetaSubTypePaloalto' - - $ref: '#/components/schemas/ConfigFirewallMetaSubTypeJsonrpc' - - $ref: '#/components/schemas/ConfigFirewallMetaSubTypeCheckpoint' - - $ref: '#/components/schemas/ConfigFirewallMetaSubTypeFamilyzone' - - $ref: '#/components/schemas/ConfigFirewallMetaSubTypeLightspeedrocket' - $ref: '#/components/schemas/ConfigFirewallMetaSubTypeWatchguard' + - $ref: '#/components/schemas/ConfigFirewallMetaSubTypeLightspeedrocket' + - $ref: '#/components/schemas/ConfigFirewallMetaSubTypeFamilyzone' + - $ref: '#/components/schemas/ConfigFirewallMetaSubTypeBarracudang' + - $ref: '#/components/schemas/ConfigFirewallMetaSubTypeCheckpoint' - $ref: '#/components/schemas/ConfigFirewallMetaSubTypeIboss' + - $ref: '#/components/schemas/ConfigFirewallMetaSubTypeFortigate' ConfigFirewallMeta: properties: meta: properties: + act_on_accounting_stop: + $ref: '#/components/schemas/Meta' cache_timeout: $ref: '#/components/schemas/Meta' cache_updates: @@ -5685,6 +5685,12 @@ components: $ref: '#/components/schemas/Meta' port: $ref: '#/components/schemas/Meta' + sso_on_access_reevaluation: + $ref: '#/components/schemas/Meta' + sso_on_accounting: + $ref: '#/components/schemas/Meta' + sso_on_dhcp: + $ref: '#/components/schemas/Meta' type: $ref: '#/components/schemas/Meta' uid: @@ -5699,6 +5705,8 @@ components: properties: meta: properties: + act_on_accounting_stop: + $ref: '#/components/schemas/Meta' cache_timeout: $ref: '#/components/schemas/Meta' cache_updates: @@ -5715,6 +5723,12 @@ components: $ref: '#/components/schemas/Meta' port: $ref: '#/components/schemas/Meta' + sso_on_access_reevaluation: + $ref: '#/components/schemas/Meta' + sso_on_accounting: + $ref: '#/components/schemas/Meta' + sso_on_dhcp: + $ref: '#/components/schemas/Meta' type: $ref: '#/components/schemas/Meta' uid: @@ -5731,6 +5745,8 @@ components: properties: meta: properties: + act_on_accounting_stop: + $ref: '#/components/schemas/Meta' cache_timeout: $ref: '#/components/schemas/Meta' cache_updates: @@ -5747,6 +5763,12 @@ components: $ref: '#/components/schemas/Meta' port: $ref: '#/components/schemas/Meta' + sso_on_access_reevaluation: + $ref: '#/components/schemas/Meta' + sso_on_accounting: + $ref: '#/components/schemas/Meta' + sso_on_dhcp: + $ref: '#/components/schemas/Meta' type: $ref: '#/components/schemas/Meta' uid: @@ -5761,6 +5783,8 @@ components: properties: meta: properties: + act_on_accounting_stop: + $ref: '#/components/schemas/Meta' cache_timeout: $ref: '#/components/schemas/Meta' cache_updates: @@ -5777,6 +5801,12 @@ components: $ref: '#/components/schemas/Meta' port: $ref: '#/components/schemas/Meta' + sso_on_access_reevaluation: + $ref: '#/components/schemas/Meta' + sso_on_accounting: + $ref: '#/components/schemas/Meta' + sso_on_dhcp: + $ref: '#/components/schemas/Meta' type: $ref: '#/components/schemas/Meta' uid: @@ -5791,6 +5821,8 @@ components: properties: meta: properties: + act_on_accounting_stop: + $ref: '#/components/schemas/Meta' cache_timeout: $ref: '#/components/schemas/Meta' cache_updates: @@ -5809,6 +5841,12 @@ components: $ref: '#/components/schemas/Meta' port: $ref: '#/components/schemas/Meta' + sso_on_access_reevaluation: + $ref: '#/components/schemas/Meta' + sso_on_accounting: + $ref: '#/components/schemas/Meta' + sso_on_dhcp: + $ref: '#/components/schemas/Meta' type: $ref: '#/components/schemas/Meta' uid: @@ -5823,6 +5861,8 @@ components: properties: meta: properties: + act_on_accounting_stop: + $ref: '#/components/schemas/Meta' cache_timeout: $ref: '#/components/schemas/Meta' cache_updates: @@ -5839,6 +5879,12 @@ components: $ref: '#/components/schemas/Meta' port: $ref: '#/components/schemas/Meta' + sso_on_access_reevaluation: + $ref: '#/components/schemas/Meta' + sso_on_accounting: + $ref: '#/components/schemas/Meta' + sso_on_dhcp: + $ref: '#/components/schemas/Meta' type: $ref: '#/components/schemas/Meta' uid: @@ -5853,6 +5899,8 @@ components: properties: meta: properties: + act_on_accounting_stop: + $ref: '#/components/schemas/Meta' cache_timeout: $ref: '#/components/schemas/Meta' cache_updates: @@ -5871,6 +5919,12 @@ components: $ref: '#/components/schemas/Meta' port: $ref: '#/components/schemas/Meta' + sso_on_access_reevaluation: + $ref: '#/components/schemas/Meta' + sso_on_accounting: + $ref: '#/components/schemas/Meta' + sso_on_dhcp: + $ref: '#/components/schemas/Meta' type: $ref: '#/components/schemas/Meta' uid: @@ -5885,6 +5939,8 @@ components: properties: meta: properties: + act_on_accounting_stop: + $ref: '#/components/schemas/Meta' cache_timeout: $ref: '#/components/schemas/Meta' cache_updates: @@ -5901,6 +5957,12 @@ components: $ref: '#/components/schemas/Meta' port: $ref: '#/components/schemas/Meta' + sso_on_access_reevaluation: + $ref: '#/components/schemas/Meta' + sso_on_accounting: + $ref: '#/components/schemas/Meta' + sso_on_dhcp: + $ref: '#/components/schemas/Meta' type: $ref: '#/components/schemas/Meta' uid: @@ -5917,6 +5979,8 @@ components: properties: meta: properties: + act_on_accounting_stop: + $ref: '#/components/schemas/Meta' cache_timeout: $ref: '#/components/schemas/Meta' cache_updates: @@ -5933,6 +5997,12 @@ components: $ref: '#/components/schemas/Meta' port: $ref: '#/components/schemas/Meta' + sso_on_access_reevaluation: + $ref: '#/components/schemas/Meta' + sso_on_accounting: + $ref: '#/components/schemas/Meta' + sso_on_dhcp: + $ref: '#/components/schemas/Meta' type: $ref: '#/components/schemas/Meta' uid: @@ -5949,6 +6019,8 @@ components: properties: meta: properties: + act_on_accounting_stop: + $ref: '#/components/schemas/Meta' cache_timeout: $ref: '#/components/schemas/Meta' cache_updates: @@ -5965,6 +6037,12 @@ components: $ref: '#/components/schemas/Meta' port: $ref: '#/components/schemas/Meta' + sso_on_access_reevaluation: + $ref: '#/components/schemas/Meta' + sso_on_accounting: + $ref: '#/components/schemas/Meta' + sso_on_dhcp: + $ref: '#/components/schemas/Meta' type: $ref: '#/components/schemas/Meta' uid: @@ -5979,6 +6057,8 @@ components: properties: meta: properties: + act_on_accounting_stop: + $ref: '#/components/schemas/Meta' cache_timeout: $ref: '#/components/schemas/Meta' cache_updates: @@ -5995,6 +6075,12 @@ components: $ref: '#/components/schemas/Meta' port: $ref: '#/components/schemas/Meta' + sso_on_access_reevaluation: + $ref: '#/components/schemas/Meta' + sso_on_accounting: + $ref: '#/components/schemas/Meta' + sso_on_dhcp: + $ref: '#/components/schemas/Meta' transport: $ref: '#/components/schemas/Meta' type: @@ -6013,6 +6099,8 @@ components: properties: meta: properties: + act_on_accounting_stop: + $ref: '#/components/schemas/Meta' cache_timeout: $ref: '#/components/schemas/Meta' cache_updates: @@ -6029,6 +6117,12 @@ components: $ref: '#/components/schemas/Meta' port: $ref: '#/components/schemas/Meta' + sso_on_access_reevaluation: + $ref: '#/components/schemas/Meta' + sso_on_accounting: + $ref: '#/components/schemas/Meta' + sso_on_dhcp: + $ref: '#/components/schemas/Meta' type: $ref: '#/components/schemas/Meta' uid: @@ -6043,6 +6137,8 @@ components: properties: meta: properties: + act_on_accounting_stop: + $ref: '#/components/schemas/Meta' cache_timeout: $ref: '#/components/schemas/Meta' cache_updates: @@ -6059,6 +6155,12 @@ components: $ref: '#/components/schemas/Meta' port: $ref: '#/components/schemas/Meta' + sso_on_access_reevaluation: + $ref: '#/components/schemas/Meta' + sso_on_accounting: + $ref: '#/components/schemas/Meta' + sso_on_dhcp: + $ref: '#/components/schemas/Meta' type: $ref: '#/components/schemas/Meta' uid: @@ -6071,6 +6173,9 @@ components: type: object ConfigFirewallSubTypeBarracudang: properties: + act_on_accounting_stop: + description: Act on accounting stop + type: string cache_timeout: description: Adjust the "Cache timeout" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value. @@ -6104,6 +6209,15 @@ components: port: description: If you use an alternative port, please specify type: integer + sso_on_access_reevaluation: + description: Sso on access reevaluation + type: string + sso_on_accounting: + description: Sso on accounting + type: string + sso_on_dhcp: + description: Sso on dhcp + type: string type: default: BarracudaNG description: Discriminator `BarracudaNG` @@ -6131,6 +6245,9 @@ components: type: object ConfigFirewallSubTypeCheckpoint: properties: + act_on_accounting_stop: + description: Act on accounting stop + type: string cache_timeout: description: Adjust the "Cache timeout" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value. @@ -6164,6 +6281,15 @@ components: port: description: If you use an alternative port, please specify type: integer + sso_on_access_reevaluation: + description: Sso on access reevaluation + type: string + sso_on_accounting: + description: Sso on accounting + type: string + sso_on_dhcp: + description: Sso on dhcp + type: string type: default: Checkpoint description: Discriminator `Checkpoint` @@ -6187,6 +6313,9 @@ components: type: object ConfigFirewallSubTypeContentkeeper: properties: + act_on_accounting_stop: + description: Act on accounting stop + type: string cache_timeout: description: Adjust the "Cache timeout" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value. @@ -6220,6 +6349,15 @@ components: port: description: If you use an alternative port, please specify type: integer + sso_on_access_reevaluation: + description: Sso on access reevaluation + type: string + sso_on_accounting: + description: Sso on accounting + type: string + sso_on_dhcp: + description: Sso on dhcp + type: string type: default: ContentKeeper description: Discriminator `ContentKeeper` @@ -6243,6 +6381,9 @@ components: type: object ConfigFirewallSubTypeFamilyzone: properties: + act_on_accounting_stop: + description: Act on accounting stop + type: string cache_timeout: description: Adjust the "Cache timeout" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value. @@ -6279,6 +6420,15 @@ components: port: description: If you use an alternative port, please specify type: integer + sso_on_access_reevaluation: + description: Sso on access reevaluation + type: string + sso_on_accounting: + description: Sso on accounting + type: string + sso_on_dhcp: + description: Sso on dhcp + type: string type: default: FamilyZone description: Discriminator `FamilyZone` @@ -6301,6 +6451,9 @@ components: type: object ConfigFirewallSubTypeFortigate: properties: + act_on_accounting_stop: + description: Act on accounting stop + type: string cache_timeout: description: Adjust the "Cache timeout" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value. @@ -6334,6 +6487,15 @@ components: port: description: If you use an alternative port, please specify type: integer + sso_on_access_reevaluation: + description: Sso on access reevaluation + type: string + sso_on_accounting: + description: Sso on accounting + type: string + sso_on_dhcp: + description: Sso on dhcp + type: string type: default: FortiGate description: Discriminator `FortiGate` @@ -6357,6 +6519,9 @@ components: type: object ConfigFirewallSubTypeIboss: properties: + act_on_accounting_stop: + description: Act on accounting stop + type: string cache_timeout: description: Adjust the "Cache timeout" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value. @@ -6393,6 +6558,15 @@ components: port: description: If you use an alternative port, please specify type: integer + sso_on_access_reevaluation: + description: Sso on access reevaluation + type: string + sso_on_accounting: + description: Sso on accounting + type: string + sso_on_dhcp: + description: Sso on dhcp + type: string type: default: Iboss description: Discriminator `Iboss` @@ -6416,6 +6590,9 @@ components: type: object ConfigFirewallSubTypeJsonrpc: properties: + act_on_accounting_stop: + description: Act on accounting stop + type: string cache_timeout: description: Adjust the "Cache timeout" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value. @@ -6449,6 +6626,15 @@ components: port: description: If you use an alternative port, please specify type: integer + sso_on_access_reevaluation: + description: Sso on access reevaluation + type: string + sso_on_accounting: + description: Sso on accounting + type: string + sso_on_dhcp: + description: Sso on dhcp + type: string type: default: JSONRPC description: Discriminator `JSONRPC` @@ -6476,6 +6662,9 @@ components: type: object ConfigFirewallSubTypeJunipersrx: properties: + act_on_accounting_stop: + description: Act on accounting stop + type: string cache_timeout: description: Adjust the "Cache timeout" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value. @@ -6509,6 +6698,15 @@ components: port: description: If you use an alternative port, please specify type: integer + sso_on_access_reevaluation: + description: Sso on access reevaluation + type: string + sso_on_accounting: + description: Sso on accounting + type: string + sso_on_dhcp: + description: Sso on dhcp + type: string type: default: JuniperSRX description: Discriminator `JuniperSRX` @@ -6536,6 +6734,9 @@ components: type: object ConfigFirewallSubTypeLightspeedrocket: properties: + act_on_accounting_stop: + description: Act on accounting stop + type: string cache_timeout: description: Adjust the "Cache timeout" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value. @@ -6569,6 +6770,15 @@ components: port: description: If you use an alternative port, please specify type: integer + sso_on_access_reevaluation: + description: Sso on access reevaluation + type: string + sso_on_accounting: + description: Sso on accounting + type: string + sso_on_dhcp: + description: Sso on dhcp + type: string type: default: LightSpeedRocket description: Discriminator `LightSpeedRocket` @@ -6592,6 +6802,9 @@ components: type: object ConfigFirewallSubTypePaloalto: properties: + act_on_accounting_stop: + description: Act on accounting stop + type: string cache_timeout: description: Adjust the "Cache timeout" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value. @@ -6627,6 +6840,15 @@ components: description: If you use an alternative port, please specify. This parameter is ignored when the Syslog transport is selected. type: integer + sso_on_access_reevaluation: + description: Sso on access reevaluation + type: string + sso_on_accounting: + description: Sso on accounting + type: string + sso_on_dhcp: + description: Sso on dhcp + type: string transport: description: Transport type: string @@ -6656,6 +6878,9 @@ components: type: object ConfigFirewallSubTypeSmoothwall: properties: + act_on_accounting_stop: + description: Act on accounting stop + type: string cache_timeout: description: Adjust the "Cache timeout" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value. @@ -6689,6 +6914,15 @@ components: port: description: If you use an alternative port, please specify type: integer + sso_on_access_reevaluation: + description: Sso on access reevaluation + type: string + sso_on_accounting: + description: Sso on accounting + type: string + sso_on_dhcp: + description: Sso on dhcp + type: string type: default: SmoothWall description: Discriminator `SmoothWall` @@ -6712,6 +6946,9 @@ components: type: object ConfigFirewallSubTypeWatchguard: properties: + act_on_accounting_stop: + description: Act on accounting stop + type: string cache_timeout: description: Adjust the "Cache timeout" to half the expiration delay in your firewall.
Your DHCP renewal interval should match this value. @@ -6745,6 +6982,15 @@ components: port: description: If you use an alternative port, please specify type: integer + sso_on_access_reevaluation: + description: Sso on access reevaluation + type: string + sso_on_accounting: + description: Sso on accounting + type: string + sso_on_dhcp: + description: Sso on dhcp + type: string type: default: WatchGuard description: Discriminator `WatchGuard` @@ -6959,6 +7205,116 @@ components: $ref: '#/components/schemas/ConfigInterfaceGet' type: array type: object + ConfigKafka: + properties: + admin: + description: Admin + properties: + pass: + description: Password + type: string + user: + description: User + type: string + type: object + auths: + description: Auths + items: + description: Auth + properties: + pass: + description: Password + type: string + user: + description: User + type: string + type: object + type: array + cluster: + description: Cluster + items: + description: Cluster + properties: + name: + description: Name + type: string + value: + description: Value + type: string + type: object + type: array + host_configs: + description: Host configs + items: + description: Host config + properties: + config: + description: Config + items: + description: Config + properties: + name: + description: Name + type: string + value: + description: Value + type: string + type: object + type: array + host: + description: Host + type: string + type: object + type: array + iptables: + description: Iptables + properties: + clients: + description: Clients + items: + description: Client + type: string + type: array + cluster_ips: + description: Cluster ips + items: + description: Cluster ip + type: string + type: array + type: object + type: object + ConfigKafkaList: + allOf: + - $ref: '#/components/schemas/Iterable' + - properties: + items: + items: + $ref: '#/components/schemas/ConfigKafka' + type: array + type: object + ConfigKafkaMeta: + properties: + meta: + properties: + admin: + $ref: '#/components/schemas/Meta' + auths: + $ref: '#/components/schemas/Meta' + cluster: + $ref: '#/components/schemas/Meta' + host_configs: + $ref: '#/components/schemas/Meta' + iptables: + $ref: '#/components/schemas/Meta' + type: object + type: object + ConfigKafkaWrapped: + properties: + item: + $ref: '#/components/schemas/ConfigKafka' + status: + type: integer + type: object ConfigL2Network: properties: algorithm: @@ -7106,35 +7462,35 @@ components: ubiquiti_ap_mac_to_ip: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeUbiquitiApMacToIp' propertyName: type oneOf: - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeOption82Query' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypePersonCleanup' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeIp6logCleanup' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeDnsAuditLogCleanup' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeSwitchCacheLldplocalportDescription' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeNodesMaintenance' - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeCleanupChiDatabaseCache' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeAuthLogCleanup' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeProvisioningCompliancePoll' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeClusterCheck' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeSecurityEventMaintenance' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypePkiCertificatesCheck' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeAcctMaintenance' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeRadiusAuditLogCleanup' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypePfflow' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeFlushDnsAuditLog' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeBandwidthMaintenance' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypePasswordOfTheDay' - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeFingerbankDataUpdate' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypePurgeBinaryLogs' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeNodesMaintenance' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeCertificatesCheck' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeLocationlogCleanup' - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeAdminApiAuditLogCleanup' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeNodeCurrentSessionCleanup' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeNodeCleanup' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeClusterCheck' - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeFlushRadiusAuditLog' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeAcctCleanup' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypePasswordOfTheDay' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeNodeCleanup' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeCertificatesCheck' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeIp6logCleanup' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypePurgeBinaryLogs' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypePersonCleanup' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeProvisioningCompliancePoll' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeAuthLogCleanup' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeRadiusAuditLogCleanup' - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeIp4logCleanup' - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeUbiquitiApMacToIp' - - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeLocationlogCleanup' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeAcctMaintenance' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeNodeCurrentSessionCleanup' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeBandwidthMaintenance' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeOption82Query' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeSwitchCacheLldplocalportDescription' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeSecurityEventMaintenance' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeFlushDnsAuditLog' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeDnsAuditLogCleanup' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypePkiCertificatesCheck' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypePfflow' + - $ref: '#/components/schemas/ConfigMaintenanceTaskMetaSubTypeAcctCleanup' ConfigMaintenanceTaskMetaSubTypeAcctCleanup: properties: meta: @@ -7541,12 +7897,20 @@ components: properties: meta: properties: + filter_events: + $ref: '#/components/schemas/Meta' group_id: $ref: '#/components/schemas/Meta' + heuristics: + $ref: '#/components/schemas/Meta' id: $ref: '#/components/schemas/Meta' kafka_brokers: $ref: '#/components/schemas/Meta' + kakfa_pass: + $ref: '#/components/schemas/Meta' + kakfa_user: + $ref: '#/components/schemas/Meta' read_topic: $ref: '#/components/schemas/Meta' schedule: @@ -7559,6 +7923,8 @@ components: $ref: '#/components/schemas/Meta' type: $ref: '#/components/schemas/Meta' + uuid: + $ref: '#/components/schemas/Meta' type: object type: object ConfigMaintenanceTaskMetaSubTypePkiCertificatesCheck: @@ -8536,15 +8902,27 @@ components: type: object ConfigMaintenanceTaskSubTypePfflow: properties: + filter_events: + description: Filter incoming events + type: string group_id: description: 'The Kafka Consumer Group ID ' type: string + heuristics: + description: Heuristics + type: string id: description: Pfcron Name type: string kafka_brokers: description: Kafka Brokers type: string + kakfa_pass: + description: Kafka Password + type: string + kakfa_user: + description: Kafka Username + type: string read_topic: description: The Kafka topic to read pfflows from type: string @@ -8567,6 +8945,9 @@ components: default: pfflow description: Discriminator `pfflow` type: string + uuid: + description: 'UUID ' + type: string required: - id - type @@ -9441,37 +9822,37 @@ components: URL: '#/components/schemas/ConfigPortalModuleMetaSubTypeUrl' propertyName: type oneOf: - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeProvisioning' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeSelectrole' - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthFacebook' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeChained' - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationNull' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeSslInspection' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthGithub' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeMessage' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeUrl' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthOpenid' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationEmail' - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationChoice' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthLinkedin' - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeSurvey' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationPassword' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthGoogle' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeChoice' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeShowlocalaccount' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeFixedrole' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauth' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeRootsso' - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationSponsor' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationSms' - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeMfa' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeRootsso' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeShowlocalaccount' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthLinkedin' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeUrl' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthGithub' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeSelectrole' - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationSaml' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationBlackhole' - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeRoot' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationEmail' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeChoice' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationPassword' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationBilling' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthOpenid' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeMessage' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthGoogle' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauth' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeSslInspection' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeFixedrole' - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationLogin' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationSms' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationBlackhole' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthentication' - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationOauthWindowslive' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthenticationBilling' - - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeChained' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeAuthentication' + - $ref: '#/components/schemas/ConfigPortalModuleMetaSubTypeProvisioning' ConfigPortalModuleMetaSubTypeAuthentication: properties: meta: @@ -11773,20 +12154,20 @@ components: windows: '#/components/schemas/ConfigProvisioningMetaSubTypeWindows' propertyName: type oneOf: - - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeAccept' - - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeMobileconfig' - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeDeny' - - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeSentinelone' - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeWindows' - - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeGoogleWorkspaceChromebook' + - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeMobileconfig' - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeAirwatch' - - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeJamf' - - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeIntune' - - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeMobileiron' + - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeKandji' - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeAndroid' + - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeGoogleWorkspaceChromebook' - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeJamfcloud' - - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeKandji' + - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeSentinelone' + - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeAccept' - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeDpsk' + - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeMobileiron' + - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeJamf' + - $ref: '#/components/schemas/ConfigProvisioningMetaSubTypeIntune' ConfigProvisioningMetaSubTypeAccept: properties: meta: @@ -15265,37 +15646,37 @@ components: propertyName: type oneOf: - $ref: '#/components/schemas/ConfigSourceMetaSubTypeOpenid' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeWindowslive' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeStripe' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeAd' - $ref: '#/components/schemas/ConfigSourceMetaSubTypeBlackhole' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeAzuread' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeWindowslive' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeKickbox' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeSms' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeSaml' - $ref: '#/components/schemas/ConfigSourceMetaSubTypeHtpasswd' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeHttp' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeEdir' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeEmail' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeSponsoremail' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeSql' - $ref: '#/components/schemas/ConfigSourceMetaSubTypeNull' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeAd' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeSaml' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeAzuread' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeHttp' - $ref: '#/components/schemas/ConfigSourceMetaSubTypeAdminproxy' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeAuthorization' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeRadius' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeEmail' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeEduroam' - $ref: '#/components/schemas/ConfigSourceMetaSubTypePotd' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeFacebook' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeRadius' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeGithub' - $ref: '#/components/schemas/ConfigSourceMetaSubTypeGoogle' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeLdap' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeGoogleworkspaceldap' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeSql' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeStripe' - $ref: '#/components/schemas/ConfigSourceMetaSubTypePaypal' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeKickbox' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeSms' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeLinkedin' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeTwilio' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeGithub' - $ref: '#/components/schemas/ConfigSourceMetaSubTypeKerberos' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeEaptls' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeAuthorization' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeEdir' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeFacebook' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeLdap' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeTwilio' - $ref: '#/components/schemas/ConfigSourceMetaSubTypeClickatell' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeSponsoremail' - - $ref: '#/components/schemas/ConfigSourceMetaSubTypeEduroam' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeEaptls' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeLinkedin' + - $ref: '#/components/schemas/ConfigSourceMetaSubTypeGoogleworkspaceldap' ConfigSourceMetaSubTypeAd: properties: meta: @@ -22274,8 +22655,8 @@ components: server: '#/components/schemas/ConfigSyslogForwarderMetaSubTypeServer' propertyName: type oneOf: - - $ref: '#/components/schemas/ConfigSyslogForwarderMetaSubTypeServer' - $ref: '#/components/schemas/ConfigSyslogForwarderMetaSubTypeFile' + - $ref: '#/components/schemas/ConfigSyslogForwarderMetaSubTypeServer' ConfigSyslogForwarderMetaSubTypeFile: properties: meta: @@ -36522,6 +36903,110 @@ paths: $ref: '#/components/responses/UnprocessableEntity' tags: - Config/Interfaces + /api/v1/config/kafka: + description: |- + pf::UnifiedApi::Controller::Config::Kafka + + field_meta + Get a field's meta data + + field_type + Find the field type + + field_is_required + Check if the field is required + + field_placeholder + Get the placeholder for the field + + field_resource_placeholder + The place holder for the field + + field_meta_array_items + Get the meta for the items of the array + + field_allowed + The allowed fields + + field_default + Get the default value of a field + + field_extra_meta + Get the extra meta data for a field + + field_meta_object_properties + Get the properties of a field + + field_text_meta + Update text field meta data + + field_allowed_lookup + field_allowed_lookup + + map_options + map_options + + map_option + map_option + + format_form_errors + format_form_errors + get: + description: Get an item. + operationId: api.v1.Config.Kafka.get + parameters: [] + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/ConfigKafkaWrapped' + description: Request successful. Response contains a specific resource. + '401': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + tags: + - Config/Kafka + options: + description: Get meta for a new item. + operationId: api.v1.Config.Kafka.options + parameters: [] + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/ConfigKafkaMeta' + description: Request successful. Response contains meta for a resource. + '401': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + tags: + - Config/Kafka + patch: + description: Update an item. + operationId: api.v1.Config.Kafka.update + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ConfigKafka' + responses: + '201': + $ref: '#/components/responses/Updated' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '422': + $ref: '#/components/responses/UnprocessableEntity' + tags: + - Config/Kafka /api/v1/config/l2_network/{network_id}: description: pf::UnifiedApi::Controller::Config::L2Networks get: diff --git a/html/pfappserver/lib/pfappserver/Form/Config/Kafka.pm b/html/pfappserver/lib/pfappserver/Form/Config/Kafka.pm new file mode 100644 index 000000000000..d0411eccf530 --- /dev/null +++ b/html/pfappserver/lib/pfappserver/Form/Config/Kafka.pm @@ -0,0 +1,106 @@ +package pfappserver::Form::Config::Kafka; + +=head1 NAME + +pfappserver::Form::Config::Kafka - + +=head1 DESCRIPTION + +pfappserver::Form::Config::Kafka + +=cut + +use strict; +use warnings; +use HTML::FormHandler::Moose; +extends 'pfappserver::Base::Form'; +with 'pfappserver::Base::Form::Role::Help'; + +has_field 'iptables' => ( + type => 'Compound', +); + +has_field 'iptables.clients' => ( + type => 'Repeatable', +); + +has_field 'iptables.clients.contains' => ( + type => 'IPAddress', +); + +has_field 'iptables.cluster_ips' => ( + type => 'Repeatable', +); + +has_field 'iptables.cluster_ips.contains' => ( + type => 'IPAddress', +); + +has_field 'admin' => ( + type => 'UserPass', +); + +has_field 'auths' => ( + type => 'Repeatable', +); + +has_field 'auths.contains' => ( + type => 'UserPass', +); + +has_field 'host_configs' => ( + type => 'Repeatable', +); + +has_field 'host_configs.contains' => ( + type => 'Compound', +); + +has_field 'host_configs.contains.config' => ( + type => 'Repeatable', +); + +has_field 'host_configs.contains.config.contains' => ( + type => 'NameVal', +); + +has_field 'host_configs.contains.host' => ( + type => 'Text', +); + +has_field 'cluster' => ( + type => 'Repeatable', +); + +has_field 'cluster.contains' => ( + type => 'NameVal', +); + +=head1 AUTHOR + +Inverse inc. + +=head1 COPYRIGHT + +Copyright (C) 2005-2024 Inverse inc. + +=head1 LICENSE + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, +USA. + +=cut + +1; diff --git a/html/pfappserver/lib/pfappserver/Form/Config/Pfcron/pfflow.pm b/html/pfappserver/lib/pfappserver/Form/Config/Pfcron/pfflow.pm index 50a4a7c11798..9a427916d357 100644 --- a/html/pfappserver/lib/pfappserver/Form/Config/Pfcron/pfflow.pm +++ b/html/pfappserver/lib/pfappserver/Form/Config/Pfcron/pfflow.pm @@ -51,6 +51,46 @@ has_field 'submit_batch' => ( help => \&batch_help_text }, ); +has_field 'uuid' => ( + type => 'Text', + default_method => \&default_field_method, + tags => { help => 'UUID ' }, +); + +has_field 'kakfa_user' => ( + type => 'Text', + default_method => \&default_field_method, + tags => { help => 'Kafka Username' }, +); + +has_field 'kakfa_pass' => ( + type => 'Text', + default_method => \&default_field_method, + tags => { help => 'Kafka Password' }, +); + +has_field 'filter_events' => ( + type => 'Toggle', + checked_value => '1', + unchecked_value => '0', + default_method => \&default_field_method, + tags => { + after_element => \&help, + help => "Filter incoming events", + }, +); + +has_field 'heuristics' => ( + type => 'Toggle', + checked_value => '1', + unchecked_value => '0', + default_method => \&default_field_method, + tags => { + after_element => \&help, + help => "Heuristics", + }, +); + =head1 AUTHOR Inverse inc. diff --git a/html/pfappserver/lib/pfappserver/Form/Field/NameVal.pm b/html/pfappserver/lib/pfappserver/Form/Field/NameVal.pm new file mode 100644 index 000000000000..a2d56bf0119b --- /dev/null +++ b/html/pfappserver/lib/pfappserver/Form/Field/NameVal.pm @@ -0,0 +1,63 @@ +package pfappserver::Form::Field::NameVal; + +=head1 NAME + +pfappserver::Form::Field::NameVal - + +=head1 DESCRIPTION + +pfappserver::Form::Field::NameVal + +=cut + +use strict; +use warnings; +use HTML::FormHandler::Moose; +extends 'HTML::FormHandler::Field::Compound'; +with 'pfappserver::Base::Form::Role::Help'; + +=head2 name + +Name + +=cut + +has_field 'name' => ( + type => 'Text', + label => 'Name', +); + +has_field 'value' => ( + type => 'Text', + label => 'Value', +); + +=head1 AUTHOR + +Inverse inc. + +=head1 COPYRIGHT + +Copyright (C) 2005-2024 Inverse inc. + +=head1 LICENSE + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, +USA. + +=cut + +1; + diff --git a/html/pfappserver/lib/pfappserver/Form/Field/UserPass.pm b/html/pfappserver/lib/pfappserver/Form/Field/UserPass.pm new file mode 100644 index 000000000000..e8a542793f53 --- /dev/null +++ b/html/pfappserver/lib/pfappserver/Form/Field/UserPass.pm @@ -0,0 +1,59 @@ +package pfappserver::Form::Field::UserPass; + +=head1 NAME + +pfappserver::Form::Field::UserPass - + +=head1 DESCRIPTION + +pfappserver::Form::Field::UserPass + +=cut + +use strict; +use warnings; +use strict; +use warnings; +use HTML::FormHandler::Moose; +extends 'HTML::FormHandler::Field::Compound'; +with 'pfappserver::Base::Form::Role::Help'; + +has_field 'user' => ( + type => 'Text', + label => 'User', +); + +has_field 'pass' => ( + type => 'ObfuscatedText', + label => 'Password', +); + +=head1 AUTHOR + +Inverse inc. + +=head1 COPYRIGHT + +Copyright (C) 2005-2024 Inverse inc. + +=head1 LICENSE + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, +USA. + +=cut + +1; + diff --git a/html/pfappserver/root/src/components/new/BaseFormGroupArray.vue b/html/pfappserver/root/src/components/new/BaseFormGroupArray.vue index b82bcab1ae61..8a6052730993 100644 --- a/html/pfappserver/root/src/components/new/BaseFormGroupArray.vue +++ b/html/pfappserver/root/src/components/new/BaseFormGroupArray.vue @@ -30,8 +30,7 @@ >{{ buttonLabel || $t('Add') }}
+ class="base-form-group-array-items"> @@ -42,10 +41,10 @@ 'is-lastchild': index === inputValue.length - 1 }" > - + {{ index + 1 }} - + { // yup throws an exception when a path is not defined in the schema // https://github.com/jquense/yup/issues/599 try { - validationPromise = schema.validateAt(path.value, form.value, { recursive, abortEarly: !recursive }) + //validationPromise = schema.validateAt(path.value, form.value, { recursive, abortEarly: !recursive }) + const schemaAt = yup.reach(schema, path.value) + const formAt = (function(p) { return p.split('.').reduce((a, v) => a[v], form.value) })(namespace.value) + validationPromise = schemaAt.validate(formAt, { recursive, abortEarly: !recursive }) } catch (e) { // path not defined in schema validationPromise = true } diff --git a/html/pfappserver/root/src/views/Configuration/_router/index.js b/html/pfappserver/root/src/views/Configuration/_router/index.js index 0c239635fe40..fe80c055a128 100644 --- a/html/pfappserver/root/src/views/Configuration/_router/index.js +++ b/html/pfappserver/root/src/views/Configuration/_router/index.js @@ -29,6 +29,7 @@ import SyslogForwardersRoutes from '../syslogForwarders/_router' import WrixRoutes from '../wrix/_router' import PkiRoutes from '../pki/_router' import MfasRoutes from '../mfas/_router' +import FleetDMRoutes from '../fleetDM/_router' /* Advanced Access Configuration */ const AdvancedAccessConfigurationSection = () => import(/* webpackChunkName: "Configuration" */ '../_components/TheSectionAdvancedAccessConfiguration') @@ -59,12 +60,12 @@ import MonitRoutes from '../monit/_router' import ServicesRoutes from '../services/_router' import DatabaseRoutes from '../database/_router' import ActiveActiveRoutes from '../activeActive/_router' -import FleetDMRoutes from '../fleetDM/_router' import RadiusRoutes from '../radius/_router' import DnsRoutes from '../dns/_router' import AdminLoginRoutes from '../adminLogin/_router' import AdminRolesRoutes from '../adminRoles/_router' import ConnectorsRoutes from '../connectors/_router' +import KafkaRoutes from '../kafka/_router' import store from '@/store' import BasesStoreModule from '../bases/_store' @@ -131,6 +132,7 @@ const route = { ...WrixRoutes, ...PkiRoutes, ...MfasRoutes, + ...FleetDMRoutes, /** * Advanced Access Configuration @@ -178,13 +180,13 @@ const route = { ...ServicesRoutes, ...DatabaseRoutes, ...ActiveActiveRoutes, - ...FleetDMRoutes, ...RadiusRoutes, ...DnsRoutes, ...AdminLoginRoutes, ...AdminRolesRoutes, ...SslCertificatesRoutes, - ...ConnectorsRoutes + ...ConnectorsRoutes, + ...KafkaRoutes, ] } diff --git a/html/pfappserver/root/src/views/Configuration/index.vue b/html/pfappserver/root/src/views/Configuration/index.vue index 519f3d667cd3..64fca5117794 100644 --- a/html/pfappserver/root/src/views/Configuration/index.vue +++ b/html/pfappserver/root/src/views/Configuration/index.vue @@ -164,11 +164,13 @@ const setup = () => { { name: i18n.t('OCSP Profiles'), path: '/configuration/radius/ocsp' } ] }, - { name: i18n.t('DNS Configuration'), path: '/configuration/dns' }, { name: i18n.t('Admin Access'), path: '/configuration/admin_roles' }, { name: i18n.t('Admin Login'), path: '/configuration/admin_login' }, + { name: i18n.t('Connectors'), path: '/configuration/connectors' }, + { name: i18n.t('DNS Configuration'), path: '/configuration/dns' }, + { name: i18n.t('FleetDM'), path: '/configuration/fleetdm', class: 'no-saas'}, + { name: i18n.t('Kafka'), path: '/configuration/kafka' }, { name: i18n.t('SSL Certificates'), path: '/configuration/certificates' }, - { name: i18n.t('Connectors'), path: '/configuration/connectors' } ] } ])) diff --git a/html/pfappserver/root/src/views/Configuration/kafka/_api.js b/html/pfappserver/root/src/views/Configuration/kafka/_api.js new file mode 100644 index 000000000000..930dab74a6a6 --- /dev/null +++ b/html/pfappserver/root/src/views/Configuration/kafka/_api.js @@ -0,0 +1,20 @@ +import apiCall from '@/utils/api' + +export default { + item: () => { + return apiCall.get(['config', 'kafka']).then(response => { + return response.data.item + }) + }, + itemOptions: () => { + return apiCall.options(['config', 'kafka']).then(response => { + return response.data + }) + }, + update: data => { + const patch = data.quiet ? 'patchQuiet' : 'patch' + return apiCall[patch](['config', 'kafka'], data).then(response => { + return response.data + }) + } +} diff --git a/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseAuth.vue b/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseAuth.vue new file mode 100644 index 000000000000..375dbcda5c3b --- /dev/null +++ b/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseAuth.vue @@ -0,0 +1,117 @@ + + diff --git a/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseClusterConfig.vue b/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseClusterConfig.vue new file mode 100644 index 000000000000..08794b342a81 --- /dev/null +++ b/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseClusterConfig.vue @@ -0,0 +1,115 @@ + + + diff --git a/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupAuths.js b/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupAuths.js new file mode 100644 index 000000000000..408ae4358407 --- /dev/null +++ b/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupAuths.js @@ -0,0 +1,35 @@ +import { BaseFormGroupArray, BaseFormGroupArrayProps } from '@/components/new' +import BaseAuth from './BaseAuth' +import i18n from '@/utils/locale' + +export const props = { + ...BaseFormGroupArrayProps, + + buttonLabel: { + type: String, + default: i18n.t('Add Auth') + }, + // overload :showIndex + showIndex: false, + + // overload :childComponent + childComponent: { + type: Object, + default: () => BaseAuth + }, + + // overload :defaultItem + defaultItem: { + type: Object, + default: () => ({ + user: null, + pass: null + }) + } +} + +export default { + name: 'base-form-group-auths', + extends: BaseFormGroupArray, + props +} diff --git a/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupClusterConfig.js b/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupClusterConfig.js new file mode 100644 index 000000000000..60d574034ce3 --- /dev/null +++ b/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupClusterConfig.js @@ -0,0 +1,35 @@ +import { BaseFormGroupArray, BaseFormGroupArrayProps } from '@/components/new' +import BaseClusterConfig from './BaseClusterConfig' +import i18n from '@/utils/locale' + +export const props = { + ...BaseFormGroupArrayProps, + + buttonLabel: { + type: String, + default: i18n.t('Add Cluster Config') + }, + // overload :showIndex + showIndex: false, + + // overload :childComponent + childComponent: { + type: Object, + default: () => BaseClusterConfig + }, + + // overload :defaultItem + defaultItem: { + type: Object, + default: () => ({ + name: null, + value: null + }) + } +} + +export default { + name: 'base-form-group-cluster-config', + extends: BaseFormGroupArray, + props +} diff --git a/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupHostConfig.js b/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupHostConfig.js new file mode 100644 index 000000000000..3c36f4f74ead --- /dev/null +++ b/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupHostConfig.js @@ -0,0 +1,35 @@ +import { BaseFormGroupArray, BaseFormGroupArrayProps } from '@/components/new' +import BaseHostConfigConfig from './BaseHostConfigConfig' +import i18n from '@/utils/locale' + +export const props = { + ...BaseFormGroupArrayProps, + + buttonLabel: { + type: String, + default: i18n.t('Add Config') + }, + // overload :showIndex + showIndex: false, + + // overload :childComponent + childComponent: { + type: Object, + default: () => BaseHostConfigConfig + }, + + // overload :defaultItem + defaultItem: { + type: Object, + default: () => ({ + name: null, + value: null + }) + } +} + +export default { + name: 'base-form-group-host-config', + extends: BaseFormGroupArray, + props +} diff --git a/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupHostConfigs.js b/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupHostConfigs.js new file mode 100644 index 000000000000..6ce3be0e8dbd --- /dev/null +++ b/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupHostConfigs.js @@ -0,0 +1,35 @@ +import { BaseFormGroupArray, BaseFormGroupArrayProps } from '@/components/new' +import BaseHostConfig from './BaseHostConfig' +import i18n from '@/utils/locale' + +export const props = { + ...BaseFormGroupArrayProps, + + buttonLabel: { + type: String, + default: i18n.t('Add Host Config') + }, + // overload :showIndex + showIndex: false, + + // overload :childComponent + childComponent: { + type: Object, + default: () => BaseHostConfig + }, + + // overload :defaultItem + defaultItem: { + type: Object, + default: () => ({ + name: null, + value: null + }) + } +} + +export default { + name: 'base-form-group-host-configs', + extends: BaseFormGroupArray, + props +} diff --git a/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupIptables.js b/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupIptables.js new file mode 100644 index 000000000000..11c3a3537949 --- /dev/null +++ b/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseFormGroupIptables.js @@ -0,0 +1,28 @@ +import { BaseFormGroupArray, BaseFormGroupArrayProps } from '@/components/new' +import { BaseInput } from '@/components/new/' +import i18n from '@/utils/locale' + +export const props = { + ...BaseFormGroupArrayProps, + + buttonLabel: { + type: String, + default: i18n.t('Add IPv4') + }, + // overload :childComponent + childComponent: { + type: Object, + default: () => BaseInput + }, + // overload :defaultItem + defaultItem: { + type: String, + default: () => (null) + } +} + +export default { + name: 'base-form-group-iptables', + extends: BaseFormGroupArray, + props +} diff --git a/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseHostConfig.vue b/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseHostConfig.vue new file mode 100644 index 000000000000..024675ac7042 --- /dev/null +++ b/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseHostConfig.vue @@ -0,0 +1,117 @@ + + diff --git a/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseHostConfigConfig.vue b/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseHostConfigConfig.vue new file mode 100644 index 000000000000..8b138eb9f275 --- /dev/null +++ b/html/pfappserver/root/src/views/Configuration/kafka/_components/BaseHostConfigConfig.vue @@ -0,0 +1,115 @@ + + + diff --git a/html/pfappserver/root/src/views/Configuration/kafka/_components/TheForm.vue b/html/pfappserver/root/src/views/Configuration/kafka/_components/TheForm.vue new file mode 100644 index 000000000000..c26b85207f64 --- /dev/null +++ b/html/pfappserver/root/src/views/Configuration/kafka/_components/TheForm.vue @@ -0,0 +1,95 @@ + + + diff --git a/html/pfappserver/root/src/views/Configuration/kafka/_components/TheView.js b/html/pfappserver/root/src/views/Configuration/kafka/_components/TheView.js new file mode 100644 index 000000000000..4b2bb2b13c42 --- /dev/null +++ b/html/pfappserver/root/src/views/Configuration/kafka/_components/TheView.js @@ -0,0 +1,27 @@ +import { + BaseView, + + FormButtonBar, + TheForm +} from './' + +const components = { + FormButtonBar, + TheForm +} + +import { useViewResource, useViewResourceProps as props } from '../../_composables/useViewResource' + +import * as resource from '../_composables/useResource' +const setup = (props, context) => useViewResource(resource, props, context) + +// @vue/component +export default { + name: 'the-view', + extends: BaseView, + inheritAttrs: false, + components, + props, + setup +} + diff --git a/html/pfappserver/root/src/views/Configuration/kafka/_components/index.js b/html/pfappserver/root/src/views/Configuration/kafka/_components/index.js new file mode 100644 index 000000000000..c9c64c2a5491 --- /dev/null +++ b/html/pfappserver/root/src/views/Configuration/kafka/_components/index.js @@ -0,0 +1,27 @@ +import {BaseViewResource} from '../../_components/new/' +import { + BaseFormButtonBar, + BaseFormGroupInput, + BaseFormGroupInputPassword, +} from '@/components/new/' +import BaseFormGroupClusterConfig from './BaseFormGroupClusterConfig' +import BaseFormGroupHostConfigs from './BaseFormGroupHostConfigs' +import BaseFormGroupAuths from './BaseFormGroupAuths' +import BaseFormGroupIptables from './BaseFormGroupIptables' +import TheForm from './TheForm' +import TheView from './TheView' + +export { + BaseFormButtonBar as FormButtonBar, + + BaseFormGroupInputPassword as FormGroupAdminPass, + BaseFormGroupInput as FormGroupAdminUser, + BaseFormGroupClusterConfig as FormGroupClusterConfig, + BaseFormGroupHostConfigs as FormGroupHostConfigs, + BaseFormGroupAuths as FormGroupAuths, + BaseFormGroupIptables as FormGroupIptables, + + BaseViewResource as BaseView, + TheForm, + TheView +} diff --git a/html/pfappserver/root/src/views/Configuration/kafka/_composables/useResource.js b/html/pfappserver/root/src/views/Configuration/kafka/_composables/useResource.js new file mode 100644 index 000000000000..cdd26532d70d --- /dev/null +++ b/html/pfappserver/root/src/views/Configuration/kafka/_composables/useResource.js @@ -0,0 +1,23 @@ +import { computed } from '@vue/composition-api' +import i18n from '@/utils/locale' + +export const useTitle = () => i18n.t('Kafka') + +export const useServices = () => computed(() => { + return { + message: i18n.t('Modifying this configuration requires a restart of kafka.'), + services: ['kafka'], + system_services: [], + k8s_services: [], + systemd: false + } +}) + +export const useStore = $store => { + return { + isLoading: computed(() => $store.getters['$_kafka/isLoading']), + getItem: () => $store.dispatch('$_kafka/getKafka'), + getItemOptions: () => $store.dispatch('$_kafka/optionsKafka'), + updateItem: params => $store.dispatch('$_kafka/updateKafka', params) + } +} diff --git a/html/pfappserver/root/src/views/Configuration/kafka/_router.js b/html/pfappserver/root/src/views/Configuration/kafka/_router.js new file mode 100644 index 000000000000..535293786e11 --- /dev/null +++ b/html/pfappserver/root/src/views/Configuration/kafka/_router.js @@ -0,0 +1,25 @@ +import store from '@/store' +import BasesStoreModule from './_store' + +const TheView = () => import(/* webpackChunkName: "Configuration" */ './_components/TheView') + +export const beforeEnter = (to, from, next = () => {}) => { + if (!store.state.$_kafka) { + store.registerModule('$_kafka', BasesStoreModule) + } + next() +} + +const can = () => !store.getters['system/isSaas'] + +export default [ + { + path: 'kafka', + name: 'kafka', + component: TheView, + meta: { + can + }, + beforeEnter + } +] diff --git a/html/pfappserver/root/src/views/Configuration/kafka/_store.js b/html/pfappserver/root/src/views/Configuration/kafka/_store.js new file mode 100644 index 000000000000..08c59fe867e5 --- /dev/null +++ b/html/pfappserver/root/src/views/Configuration/kafka/_store.js @@ -0,0 +1,94 @@ +/** +* "$_kafka" store module +*/ +import Vue from 'vue' +import api from './_api' +import { types } from '@/store' + +// Default values +const state = () => { + return { + cache: {}, // items details + message: '', + itemStatus: '' + } +} + +const getters = { + isWaiting: state => [types.LOADING, types.DELETING].includes(state.itemStatus), + isLoading: state => state.itemStatus === types.LOADING +} + +const actions = { + all: () => { + const params = { + sort: 'id', + fields: ['id'].join(',') + } + return api.bases(params).then(response => { + return response.items + }) + }, + getKafka: ({ state, commit }) => { + if (state.cache['kafka']) { + return Promise.resolve(state.cache['kafka']).then(cache => JSON.parse(JSON.stringify(cache))) + } + commit('ITEM_REQUEST') + return api.item().then(item => { + commit('ITEM_REPLACED', item) + return JSON.parse(JSON.stringify(item)) + }).catch((err) => { + commit('ITEM_ERROR', err.response) + throw err + }) + }, + optionsKafka: ({ commit }) => { + commit('ITEM_REQUEST') + return api.itemOptions().then(response => { + commit('ITEM_SUCCESS') + return response + }).catch((err) => { + commit('ITEM_ERROR', err.response) + throw err + }) + }, + updateKafka: ({ commit }, data) => { + commit('ITEM_REQUEST') + data.id = 'kafka' + return api.update(data).then(response => { + commit('ITEM_REPLACED', data) + return response + }).catch(err => { + commit('ITEM_ERROR', err.response) + throw err + }) + } +} + +const mutations = { + ITEM_REQUEST: (state, type) => { + state.itemStatus = type || types.LOADING + state.message = '' + }, + ITEM_REPLACED: (state, data) => { + state.itemStatus = types.SUCCESS + Vue.set(state.cache, data.id, JSON.parse(JSON.stringify(data))) + }, + ITEM_ERROR: (state, response) => { + state.itemStatus = types.ERROR + if (response && response.data) { + state.message = response.data.message + } + }, + ITEM_SUCCESS: (state) => { + state.itemStatus = types.SUCCESS + } +} + +export default { + namespaced: true, + state, + getters, + actions, + mutations +} diff --git a/html/pfappserver/root/src/views/Configuration/kafka/config.js b/html/pfappserver/root/src/views/Configuration/kafka/config.js new file mode 100644 index 000000000000..bed7d68cd602 --- /dev/null +++ b/html/pfappserver/root/src/views/Configuration/kafka/config.js @@ -0,0 +1,34 @@ +import { pfFieldType as fieldType } from '@/globals/pfField' + +export const commonKeys = { + KAFKA_CONTROLLER_LISTENER_NAMES: [fieldType.SUBSTRING], + KAFKA_CONTROLLER_QUORUM_VOTERS: [fieldType.SUBSTRING], + KAFKA_INTER_BROKER_LISTENER_NAME: [fieldType.SUBSTRING], + KAFKA_LISTENERS: [fieldType.SUBSTRING], + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: [fieldType.SUBSTRING], + KAFKA_LOG_DIRS: [fieldType.SUBSTRING], + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: [fieldType.SUBSTRING], + KAFKA_OPTS: [fieldType.SUBSTRING], + KAFKA_PROCESS_ROLES: [fieldType.SUBSTRING], + KAFKA_SASL_ENABLED_MECHANISMS: [fieldType.SUBSTRING] +} + +export const clusterKeys = { + CLUSTER_ID: [fieldType.SUBSTRING], + ...commonKeys +} + +export const clusterFields = Object.entries(clusterKeys).reduce((fields, [key, types]) => { + return { ...fields, [key]: { value: key, text: key, types } } +}, {}) + +export const configKeys = { + KAFKA_NODE_ID: [fieldType.INTEGER], + KAFKA_ADVERTISED_LISTENERS: [fieldType.SUBSTRING], + ...commonKeys +} + +export const configFields = Object.entries(configKeys).reduce((fields, [key, types]) => { + return { ...fields, [key]: { value: key, text: key, types } } +}, {}) + diff --git a/html/pfappserver/root/src/views/Configuration/kafka/schema.js b/html/pfappserver/root/src/views/Configuration/kafka/schema.js new file mode 100644 index 000000000000..2f5b72c41446 --- /dev/null +++ b/html/pfappserver/root/src/views/Configuration/kafka/schema.js @@ -0,0 +1,52 @@ +import i18n from '@/utils/locale' +import yup from '@/utils/yup' + +const schemaAuth = yup.object({ + user: yup.string().nullable().required().label(i18n.t('Username')), + pass: yup.string().nullable().required().label(i18n.t('Password')), +}) + +const schemaAuths = yup.array().ensure().unique(i18n.t('Duplicate username'), ({ name }) => name).of(schemaAuth) + +const schemaCluster = yup.object({ + name: yup.string().nullable().required().label(i18n.t('Name')), + value: yup.string().nullable().required().label(i18n.t('Value')), +}) + +const schemaClusters = yup.array().ensure().unique(i18n.t('Duplicate key'), ({ name }) => name).of(schemaCluster) + +const schemaHostConfigConfigItem = yup.object({ + name: yup.string().nullable().required().label(i18n.t('Key')), + value: yup.string().nullable().required().label(i18n.t('Value')) +}) + +const schemaHostConfigConfig = yup.array().ensure().of(schemaHostConfigConfigItem) + +const schemaHostConfig = yup.object({ + host: yup.string().nullable().required().label(i18n.t('Host')) + .isCommonNameOrFQDN(i18n.t('Invalid Hostname.')), + config: yup.array().ensure().of(schemaHostConfigConfig) +}) + +const schemaHostConfigs = yup.array().ensure().of(schemaHostConfig) + +const schemaIpv4 = yup.string().nullable().required().label(i18n.t('IPv4')) + .isIpv4() + +const schemaIpv4s = yup.array().ensure().of(schemaIpv4) + +const schemaIptables = yup.object({ + clients: schemaIpv4s, + cluster_ips: schemaIpv4s, +}) + +export const schema = () => yup.object({ + admin: schemaAuth, + auths: schemaAuths, + cluster: schemaClusters, + host_configs: schemaHostConfigs, + iptables: schemaIptables, +}) + +export default schema + diff --git a/html/pfappserver/root/src/views/Configuration/maintenanceTasks/_components/TheForm.vue b/html/pfappserver/root/src/views/Configuration/maintenanceTasks/_components/TheForm.vue index 643bc17d09d9..beae7f621041 100644 --- a/html/pfappserver/root/src/views/Configuration/maintenanceTasks/_components/TheForm.vue +++ b/html/pfappserver/root/src/views/Configuration/maintenanceTasks/_components/TheForm.vue @@ -146,6 +146,65 @@ enabled-value="enabled" disabled-value="disabled" /> + + + + + + + + + + + + + + + + + + + From d02e081e5c09d6824000d30a1c8da3eee8c599b1 Mon Sep 17 00:00:00 2001 From: Darren+Satkunas Date: Thu, 5 Dec 2024 19:21:57 +0000 Subject: [PATCH 174/176] update NEWS --- NEWS.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/NEWS.asciidoc b/NEWS.asciidoc index b9635f7205c8..e769320b0a00 100644 --- a/NEWS.asciidoc +++ b/NEWS.asciidoc @@ -60,6 +60,7 @@ For a list of compatibility related changes see the < Date: Thu, 5 Dec 2024 16:52:10 -0500 Subject: [PATCH 175/176] feature/config-crypt (#8406) * Add pf::config::crypt * PfCrypt in go * Remove salt and sync perl and golang encrypt/decrypt * Manage the system_init_key * Use system_key_init for key creation * load the systemInitKey * remove pfconfigdriver.UnifiedApiSystemUser * Add more tests * load system_init_key from the env * Create a wrapper for encrypt strings * Make the LDAP password encrypted * Add test file * tool for encrypting/decrypting data * Add tool for encrypting/decrypting * Add encrypted password * Allow encrypted to be saved in pfconfig * Only compute the key once * Save any ObfuscatedText as encrypted * Taken care of in a higher place * Only apply the FREEZE method in pfconfig * Add func starts_with * Add tests * update ends_with with a faster version * Use a different object for thawing * Add util-linux * add bin/pfcrypt * Add usage output * fix config * Add system_init_key to Makefile and git-storage scripts * Allow to use a different key for encryption/decryption * Move the all_stores logic to pf::ConfigStore::All * Update SYSTEM_INIT_KEY file * upgrade script * if syntax setval * Fix issue with saving * Add pfcrypt to all rule * Fix secret in go * Make Pass a pfcrypt.CryptString * change where the /usr/local/pf/conf/system_init_key is created * Fix crypt * just copy sbin for debian * Split copy to two different rules * Fix DestDir --- .gitignore | 4 +- Makefile | 6 +- addons/change_system_init_key.pl | 126 ++++++++++++++ addons/dev-helpers/create-pf-git-storage.sh | 1 + addons/upgrade/to-14.1-crypt-at-rest.pl | 133 +++++++++++++++ ci/lib/build/generate-material.sh | 1 + config.mk | 1 + debian/packetfence-config.postinst | 5 + debian/rules | 3 +- go/.gitignore | 2 + go/Makefile | 13 +- go/chisel/server/server_handler.go | 2 +- go/cmd/pfcrypt/main.go | 46 +++++ go/cmd/pfdhcp/main.go | 2 +- go/cmd/pfstats/main.go | 2 +- .../ldapSearchClient/LdapSearchClient.go | 2 +- go/config/pfcrypt/init.go | 35 ++++ go/config/pfcrypt/pfcrypt.go | 159 ++++++++++++++++++ go/config/pfcrypt/pfcrypt_test.go | 60 +++++++ go/config/pfcrypt/string.go | 49 ++++++ go/config/pfcrypt/string_test.go | 44 +++++ go/db/db.go | 4 +- go/file_paths/paths.go | 2 + go/firewallsso/barracudang.go | 9 +- go/firewallsso/checkpoint.go | 7 +- go/firewallsso/contentkeeper.go | 5 +- go/firewallsso/familyzone.go | 9 +- go/firewallsso/fortigate.go | 5 +- go/firewallsso/iboss.go | 7 +- go/firewallsso/jsonrpc.go | 9 +- go/firewallsso/junipersrx.go | 11 +- go/firewallsso/watchguard.go | 5 +- go/galeraautofix/mariadb/mariadb.go | 6 +- go/jsonrpc2/client.go | 32 ++-- go/pfconfigdriver/structs.go | 115 ++++++------- go/plugin/caddy2/api-aaa/api-aaa.go | 4 +- go/plugin/caddy2/api-aaa/api-aaa_test.go | 2 +- go/plugin/caddy2/pfpki/models/models.go | 2 +- .../pfappserver/Form/Field/ObfuscatedText.pm | 16 ++ lib/pf/ConfigStore/All.pm | 62 +++++++ lib/pf/Sereal.pm | 6 +- lib/pf/cluster.pm | 16 +- lib/pf/config/crypt.pm | 142 ++++++++++++++++ lib/pf/config/crypt/object.pm | 77 +++++++++ lib/pf/config/crypt/object/freeze.pm | 55 ++++++ lib/pf/config/crypt/string.pm | 59 +++++++ lib/pf/constants/cluster.pm | 2 + lib/pf/file_paths.pm | 3 + lib/pf/util.pm | 13 +- lib/pfconfig/backend/mysql.pm | 5 +- lib/pfconfig/cached.pm | 1 + lib/pfconfig/manager.pm | 42 ++++- .../namespaces/config/Authentication.pm | 2 + rpm/packetfence.spec | 7 + sbin/pfconfig | 6 +- t/data/authentication.conf | 14 ++ t/data/system_init_key | 1 + t/test_paths.pm | 1 + t/unittest/config/crypt/object/freeze.t | 76 +++++++++ t/util.t | 15 +- 60 files changed, 1402 insertions(+), 149 deletions(-) create mode 100755 addons/change_system_init_key.pl create mode 100755 addons/upgrade/to-14.1-crypt-at-rest.pl create mode 100644 go/cmd/pfcrypt/main.go create mode 100644 go/config/pfcrypt/init.go create mode 100644 go/config/pfcrypt/pfcrypt.go create mode 100644 go/config/pfcrypt/pfcrypt_test.go create mode 100644 go/config/pfcrypt/string.go create mode 100644 go/config/pfcrypt/string_test.go create mode 100644 lib/pf/ConfigStore/All.pm create mode 100644 lib/pf/config/crypt.pm create mode 100644 lib/pf/config/crypt/object.pm create mode 100644 lib/pf/config/crypt/object/freeze.pm create mode 100644 lib/pf/config/crypt/string.pm create mode 100644 t/data/system_init_key create mode 100755 t/unittest/config/crypt/object/freeze.t diff --git a/.gitignore b/.gitignore index 94140619ff63..e9fea42bfe31 100644 --- a/.gitignore +++ b/.gitignore @@ -51,8 +51,9 @@ conf/uploads conf/kafka.conf conf/config.toml db/upgrade-tenant-11.2-12.0.sql -bin/pfcmd bin/ntlm_auth_wrapper +bin/pfcmd +bin/pfcrypt src/mariadb_udf/*.o src/mariadb_udf/*.so src/mariadb_udf/test_pf_udf @@ -220,6 +221,7 @@ conf/templates/emails-guest_sponsor_preregistration.txt.tt conf/dns_filters.conf conf/mariadb/*.tt conf/unified_api_system_pass +conf/system_init_key html/pfappserver/pfappserver.conf html/captive-portal/captiveportal.conf nytprof/ diff --git a/Makefile b/Makefile index 20891e779f1b..d30f888a6a59 100644 --- a/Makefile +++ b/Makefile @@ -106,6 +106,9 @@ conf/local_secret: conf/unified_api_system_pass: date +%s | sha256sum | base64 | head -c 32 > conf/unified_api_system_pass +conf/system_init_key: + hexdump -e '/1 "%x"' < /dev/urandom | head -c 32 > /usr/local/pf/conf/system_init_key + bin/pfcmd: src/pfcmd.c $(CC) -O2 -g -std=c99 -Wall $< -o $@ @@ -185,7 +188,8 @@ systemd: pf-dal: perl /usr/local/pf/addons/dev-helpers/bin/generator-data-access-layer.pl -devel: configurations conf/ssl/server.key conf/ssl/server.crt conf/local_secret bin/pfcmd raddb/certs/server.crt sudo translation mysql-schema raddb/sites-enabled fingerbank chown_pf permissions bin/ntlm_auth_wrapper conf/unified_api_system_pass +devel: configurations conf/ssl/server.key conf/ssl/server.crt conf/local_secret bin/pfcmd raddb/certs/server.crt \ + sudo translation mysql-schema raddb/sites-enabled fingerbank chown_pf permissions bin/ntlm_auth_wrapper conf/unified_api_system_pass conf/system_init_key test: cd t && ./smoke.t diff --git a/addons/change_system_init_key.pl b/addons/change_system_init_key.pl new file mode 100755 index 000000000000..416f4b039b44 --- /dev/null +++ b/addons/change_system_init_key.pl @@ -0,0 +1,126 @@ +#!/usr/bin/perl + +=head1 NAME + +change_system_init_key - + +=head1 DESCRIPTION + +change_system_init_key + +=head1 SYNOPSIS + +change_system_init_key --new-key= --old-key= [FILES] + + --new-key The new key. Required + --old-key The old key. Default the contents of /usr/local/pf/conf/system_init_key or environmental variable PF_SYSTEM_INIT_KEY. + --dry-run Don't change just show what may happen. + --help Show help + +=cut + +use strict; +use warnings; +use lib qw(/usr/local/pf/lib); +use lib qw(/usr/local/pf/lib_perl/lib/perl5); +use pf::IniFiles; +use pf::config::crypt; +use pf::file_paths; +use File::Copy; +use pf::ConfigStore::All; +use Pod::Usage; +use Getopt::Long; +use pf::file_paths qw($system_init_key_file); + +my $new_key; +my $old_key = $pf::config::crypt::SYSTEM_INIT_KEY; +my $help; +my $no_update; +my $dry_run; +GetOptions ( + "new-key=s" => \$new_key, + "old-key=s" => \$old_key, + "dry-run!" => \$dry_run, + "help|h" => \$help, +) or pod2usage(); + +if($help){ + pod2usage( -verbose => 1 ); +} + +if (!$old_key) { + $old_key = $pf::config::crypt::SYSTEM_INIT_KEY; +} + +sub change_key { + my ($old_key, $new_key, $file) = @_; + my $ini = pf::IniFiles->new(-file => $file, -allowempty => 1); + my $changed = 0; + foreach my $section ( $ini->Sections() ) { + for my $param ($ini->Parameters($section)) { + my $val = $ini->val($section, $param); + next if (rindex($val, $pf::config::crypt::PREFIX)); + my $data = pf::config::crypt::pf_decrypt_with_key($old_key, $val); + die "failed to decrypt $section.$param = $val\n" if !defined $data; + if ($dry_run) { + print "Would update $section.$param = $val\n"; + next; + } + + my $new_val = pf::config::crypt::pf_encrypt_with_key($new_key, $data); + print "$section.$param = $val => $new_val\n"; + $ini->setval($section, $param, $new_val); + $changed |= 1; + } + } + + if ($changed) { + copy($file, "${file}.bak"); + $ini->RewriteConfig(); + } + + return $changed; +} + +my $new_derived_key = pf::config::crypt::derived_key($new_key); +my $old_derived_key = pf::config::crypt::derived_key($old_key); +my $changed = 0; +for my $storeClass (@{pf::ConfigStore::All::all_stores() || []}) { + my $store = $storeClass->new; + my $file_path = $store->configFile; + next if !defined $file_path || !-e $file_path; + print "Updating ", $file_path, " with new key", "\n"; + $changed |= change_key($old_derived_key, $new_derived_key, $file_path); +} + +print "export PF_SYSTEM_INIT_KEY=$new_key\n"; +open(my $fh, ">", $system_init_key_file); +print $fh $new_key; +close($fh); + +=head1 AUTHOR + +Inverse inc. + +=head1 COPYRIGHT + +Copyright (C) 2005-2024 Inverse inc. + +=head1 LICENSE + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, +USA. + +=cut diff --git a/addons/dev-helpers/create-pf-git-storage.sh b/addons/dev-helpers/create-pf-git-storage.sh index 531647eebd11..4eded1a8327c 100644 --- a/addons/dev-helpers/create-pf-git-storage.sh +++ b/addons/dev-helpers/create-pf-git-storage.sh @@ -46,6 +46,7 @@ make conf/local_secret make raddb/certs/server.crt make raddb/sites-enabled make conf/unified_api_system_pass +make conf/system_init_key make configurations make translation diff --git a/addons/upgrade/to-14.1-crypt-at-rest.pl b/addons/upgrade/to-14.1-crypt-at-rest.pl new file mode 100755 index 000000000000..8abb1a6d2255 --- /dev/null +++ b/addons/upgrade/to-14.1-crypt-at-rest.pl @@ -0,0 +1,133 @@ +#!/usr/bin/perl + +=head1 NAME + +to-14.1-crypt-at-rest - + +=head1 DESCRIPTION + +to-14.1-crypt-at-rest + +=cut + +use strict; +use warnings; +use lib qw(/usr/local/pf/lib); +use lib qw(/usr/local/pf/lib_perl/lib/perl5); +use pf::UnifiedApi::Controller::Config; +use pf::config::crypt qw(); +use Module::Pluggable + 'search_path' => [qw(pf::UnifiedApi::Controller::Config)], + 'sub_name' => '_all_config', + 'require' => 1, + 'inner' => 0, + ; + +for my $name (__PACKAGE__->_all_config) { + if ($name eq "pf::UnifiedApi::Controller::Config::Subtype" || !$name->isa("pf::UnifiedApi::Controller::Config")) { + next; + } + + my $c = $name->new(); + if ($name->isa( "pf::UnifiedApi::Controller::Config::Subtype")) { + update_config_controller_with_subtype($c, $name); + } else { + my $formName = $c->form_class; + update_config_controller($c, $name, $formName, {}); + } + +} + +sub update_config_controller { + my ($c, $name, $formName, $item) = @_; + $c->stash({admin_roles => []}); + my $form = $c->form($item); + my %fields2Encrypt; + for my $field ($form->fields) { + my $fieldName = $field->name; + my $type = $field->type; + if ($type eq 'ObfuscatedText') { + $fields2Encrypt{$fieldName} = undef; + } + } + + my $cs = $c->config_store; + my $ini = $cs->cachedConfig; + my $changed = 0; + for my $section ($ini->Sections()) { + for my $param ($ini->Parameters($section)) { + next if (!exists $fields2Encrypt{$param}); + print "Changing $section.$param\n"; + my $val = $ini->val($section, $param); + $val = pf::config::crypt::pf_encrypt($val); + $ini->setval($section, $param, $val); + $changed |= 1; + } + } + if ($changed) { + $ini->RewriteConfig(); + } +} + +sub update_config_controller_with_subtype { + my ($c, $name) = @_; + $c->stash({admin_roles => []}); + my $typeLookup = $name->type_lookup(); + + my $cs = $c->config_store; + my $ini = $cs->cachedConfig; + my $changed = 0; + for my $section ($ini->Sections()) { + my $type = $ini->val($section, 'type'); + next if !$type; + my $form = $c->form({type => $type}); + my %fields2Encrypt; + for my $field ($form->fields) { + my $fieldName = $field->name; + my $type = $field->type; + if ($type eq 'ObfuscatedText') { + $fields2Encrypt{$fieldName} = undef; + } + } + for my $param ($ini->Parameters($section)) { + next if (!exists $fields2Encrypt{$param}); + my $val = $ini->val($section, $param); + print "Changing $section.$param\n"; + $val = pf::config::crypt::pf_encrypt($val); + $ini->setval($section, $param, $val); + $changed |= 1; + } + } + if ($changed) { + $ini->RewriteConfig(); + } +} + + +=head1 AUTHOR + +Inverse inc. + +=head1 COPYRIGHT + +Copyright (C) 2005-2024 Inverse inc. + +=head1 LICENSE + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, +USA. + +=cut + diff --git a/ci/lib/build/generate-material.sh b/ci/lib/build/generate-material.sh index ea5d0fbf5073..b501f85536b1 100755 --- a/ci/lib/build/generate-material.sh +++ b/ci/lib/build/generate-material.sh @@ -37,6 +37,7 @@ generate_material() { echo "Make config files available to start pfconfig container" make -C ${PF_SRC_DIR} configurations make -C ${PF_SRC_DIR} conf/unified_api_system_pass + make -C ${PF_SRC_DIR} conf/system_init_key make -C ${PF_SRC_DIR} conf/local_secret mkdir -p ${PF_SRC_DIR}/result diff --git a/config.mk b/config.mk index 8987db4c7fef..ee3ad88e1509 100644 --- a/config.mk +++ b/config.mk @@ -54,6 +54,7 @@ LOCAL_REGISTRY = packetfence # GOVERSION = go1.23.1 PF_BINARIES = pfhttpd pfqueue-go pfdhcp pfdns pfstats pfdetect galera-autofix pfacct pfcron mysql-probe pfconnector sdnotify-proxy +PF_GO_CMDS = pfcrypt # # PF versions diff --git a/debian/packetfence-config.postinst b/debian/packetfence-config.postinst index 3cdac2f5b4e3..f2cb0b845f6d 100644 --- a/debian/packetfence-config.postinst +++ b/debian/packetfence-config.postinst @@ -28,6 +28,11 @@ case "$1" in else echo "pfconfig.conf already exists, won't touch it!" fi + + if [ ! -f /usr/local/pf/conf/system_init_key ]; then + hexdump -e '/1 "%x"' < /dev/urandom | head -c 32 > /usr/local/pf/conf/system_init_key + fi + /sbin/ldconfig if [ ${DIST} = "jessie" ] || [ ${DIST} = "stretch" ] || [ ${DIST} = "bullseye" ] || [ ${DIST} = "bookworm" ]; then systemctl enable packetfence-config diff --git a/debian/rules b/debian/rules index e7e0cb196ab3..b076f3336453 100755 --- a/debian/rules +++ b/debian/rules @@ -183,7 +183,8 @@ install: build # Golang binary install -d -m0744 $(CURDIR)/debian/packetfence-golang-daemon$(PREFIX)/$(NAME)/sbin make -C go all - make -C go DESTDIR=$(CURDIR)/debian/packetfence-golang-daemon copy + make -C go DESTDIR=$(CURDIR)/debian/packetfence-golang-daemon copy-sbin + make -C go DESTDIR=$(CURDIR)/debian/packetfence copy-bin # Build architecture-independent files here. diff --git a/go/.gitignore b/go/.gitignore index ce3148e3f5e0..10a30afac5ec 100644 --- a/go/.gitignore +++ b/go/.gitignore @@ -10,6 +10,7 @@ pfcron pfqueue-go galera-autofix mysql-probe +pfcrypt !pfhttpd/ !pfdns/ !pfdhcp/ @@ -20,6 +21,7 @@ mysql-probe !pfqueue-go/ !galera-autofix/ !mysql-probe/ +!pfcrypt/ Caddyfile acct-tester/db-tester/db-tester acct-tester/radius-tester/radius-tester diff --git a/go/Makefile b/go/Makefile index ea067fa74935..545b6611c727 100644 --- a/go/Makefile +++ b/go/Makefile @@ -3,7 +3,7 @@ GO ?= go build_cmd = $(GO) build $(ARGS) .PHONY: all -all: $(PF_BINARIES) +all: $(PF_BINARIES) $(PF_GO_CMDS) .PHONY: go-env go-env: @@ -25,12 +25,19 @@ $(ALL_BINARIES_RACE): $(build_cmd) -C cmd/$(@:-race=) -o ../../$@ -race .PHONY: copy -copy: +copy: copy-sbin copy-bin + +.PHONY: copy-sbin +copy-sbin: cp -f $(PF_BINARIES) $(DESTDIR)$(SBINDIR) +.PHONY: copy-bin +copy-bin: + cp -f $(PF_GO_CMDS) $(DESTDIR)$(BINDIR) + .PHONY: clean clean: - rm -f $(PF_BINARIES) + rm -f $(PF_BINARIES) $(PF_GO_CMDS) .PHONY: clean-coredns-src clean-coredns-src: diff --git a/go/chisel/server/server_handler.go b/go/chisel/server/server_handler.go index dddbf3b0e2ea..696da830a8bc 100644 --- a/go/chisel/server/server_handler.go +++ b/go/chisel/server/server_handler.go @@ -478,7 +478,7 @@ func (s *Server) handleRemoteFingerbankCollectorEnv(w http.ResponseWriter, req * if sharedutils.IsEnabled(fingerbankSettings.Collector.NetworkBehaviorAnalysis) { env["COLLECTOR_ENDPOINT_ANALYSIS_WEBHOOK"] = "https://localhost:9090/fingerbank/nba/webhook" - env["COLLECTOR_ENDPOINT_ANALYSIS_WEBHOOK_PASSWORD"] = webservices.Pass + env["COLLECTOR_ENDPOINT_ANALYSIS_WEBHOOK_PASSWORD"] = webservices.Pass.String() env["COLLECTOR_ENDPOINT_ANALYSIS_WEBHOOK_USERNAME"] = webservices.User env["COLLECTOR_NETWORK_BEHAVIOR_ANALYSIS"] = "true" env["COLLECTOR_NETWORK_BEHAVIOR_POLICIES"] = "/usr/local/collector-remote/conf/network_behavior_policies.conf" diff --git a/go/cmd/pfcrypt/main.go b/go/cmd/pfcrypt/main.go new file mode 100644 index 000000000000..235ac8dbda5e --- /dev/null +++ b/go/cmd/pfcrypt/main.go @@ -0,0 +1,46 @@ +package main + +import ( + "fmt" + "os" + + "github.com/inverse-inc/packetfence/go/config/pfcrypt" +) + +const usage = `Usage: + pfcrypt encrypt + pfcrypt decrypt +` + +func main() { + if len(os.Args) != 3 { + fmt.Fprintf(os.Stderr, "Not enough args\n%s\n", usage) + os.Exit(1) + return + } + + switch os.Args[1] { + default: + fmt.Fprintf(os.Stderr, "Invalid option\n%s\n", usage) + os.Exit(1) + return + case "encrypt": + ciphertext, err := pfcrypt.PfEncrypt([]byte(os.Args[2])) + if err != nil { + fmt.Printf("Error: %s\n", err.Error()) + os.Exit(1) + return + } + + fmt.Println(ciphertext) + case "decrypt": + text, err := pfcrypt.PfDecrypt(os.Args[2]) + if err != nil { + fmt.Printf("Error: %s\n", err.Error()) + os.Exit(1) + return + } + + fmt.Println(string(text)) + } +} diff --git a/go/cmd/pfdhcp/main.go b/go/cmd/pfdhcp/main.go index 3e1432f751a4..d447414ac7d4 100644 --- a/go/cmd/pfdhcp/main.go +++ b/go/cmd/pfdhcp/main.go @@ -214,7 +214,7 @@ func main() { router.HandleFunc("/api/v1/dhcp/options/network/{network:(?:[0-9]{1,3}.){3}(?:[0-9]{1,3})}", api.handleRemoveNetworkOptions).Methods("DELETE") router.HandleFunc("/api/v1/dhcp/options/mac/{mac:(?:[0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}}", api.handleOverrideOptions).Methods("POST") router.HandleFunc("/api/v1/dhcp/options/mac/{mac:(?:[0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}}", api.handleRemoveOptions).Methods("DELETE") - http.Handle("/", httpauth.SimpleBasicAuth(webservices.User, webservices.Pass)(router)) + http.Handle("/", httpauth.SimpleBasicAuth(webservices.User, webservices.Pass.String())(router)) srv := &http.Server{ Addr: ":22222", diff --git a/go/cmd/pfstats/main.go b/go/cmd/pfstats/main.go index 9d7c9d703d15..54659dab44f7 100644 --- a/go/cmd/pfstats/main.go +++ b/go/cmd/pfstats/main.go @@ -166,7 +166,7 @@ func (s ldaptype) Test(source interface{}, ctx context.Context) { } l.SetTimeout(time.Duration(timeout) * time.Second) - err = l.Bind(source.(pfconfigdriver.AuthenticationSourceLdap).BindDN, source.(pfconfigdriver.AuthenticationSourceLdap).Password) + err = l.Bind(source.(pfconfigdriver.AuthenticationSourceLdap).BindDN, source.(pfconfigdriver.AuthenticationSourceLdap).Password.String()) if err != nil { StatsdClient.Gauge("source."+source.(pfconfigdriver.AuthenticationSourceLdap).Type+"."+source.(pfconfigdriver.AuthenticationSourceLdap).PfconfigHashNS+strconv.Itoa(num), 0) } else { diff --git a/go/common/ldapSearchClient/LdapSearchClient.go b/go/common/ldapSearchClient/LdapSearchClient.go index 55c097df4a61..23a0a6497113 100644 --- a/go/common/ldapSearchClient/LdapSearchClient.go +++ b/go/common/ldapSearchClient/LdapSearchClient.go @@ -126,7 +126,7 @@ func (sc LdapSearchClient) connect() ldapClient.ILdapConnection { continue } - if err = conn.Bind(sc.LdapServer.BindDN, sc.LdapServer.Password); err != nil { + if err = conn.Bind(sc.LdapServer.BindDN, sc.LdapServer.Password.String()); err != nil { log.LogInfo(ctx, "Failed to authenticate to an LDAP server: "+err.Error()) conn.Close() continue diff --git a/go/config/pfcrypt/init.go b/go/config/pfcrypt/init.go new file mode 100644 index 000000000000..1c403feb0eed --- /dev/null +++ b/go/config/pfcrypt/init.go @@ -0,0 +1,35 @@ +package pfcrypt + +import ( + "fmt" + "os" + + "github.com/inverse-inc/packetfence/go/file_paths" +) + +var systemInitKey []byte +var dervivedKey []byte + +func setupSystemInitKey(envName, fileName string) error { + val := os.Getenv(envName) + if val != "" { + systemInitKey = []byte(val) + return nil + } + + var err error + systemInitKey, err = os.ReadFile(file_paths.SYSTEM_INIT_KEY_FILE) + if err != nil { + return fmt.Errorf("Cannot find key in env %s or file %s :%w", envName, fileName, err) + } + + return nil +} + +func init() { + if err := setupSystemInitKey("PF_SYSTEM_INIT_KEY", file_paths.SYSTEM_INIT_KEY_FILE); err != nil { + panic("Unable to setup the PF_SYSTEM_INIT secret" + err.Error()) + } + + dervivedKey = makeDerivedKey() +} diff --git a/go/config/pfcrypt/pfcrypt.go b/go/config/pfcrypt/pfcrypt.go new file mode 100644 index 000000000000..fab034fc5d04 --- /dev/null +++ b/go/config/pfcrypt/pfcrypt.go @@ -0,0 +1,159 @@ +package pfcrypt + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "fmt" + "io" + "strings" + + "golang.org/x/crypto/pbkdf2" +) + +const ( + ITERATION_COUNT = 5000 + LEN = 32 +) + +type part struct { + name string + data []byte +} + +const PREFIX = "PF_ENC[" + +func encodeParts(inputs ...part) string { + parts := make([]string, len(inputs)) + for i, t := range inputs { + parts[i] = t.name + ":" + base64.StdEncoding.EncodeToString(t.data) + } + + return strings.Join(parts, ",") +} + +func PfEncrypt(data []byte) (string, error) { + aesCypher, err := aes.NewCipher(dervivedKey) + ad := []byte{} + if err != nil { + return "", fmt.Errorf("PfEncrypt NewCipher: %w", err) + } + + gcm, err := cipher.NewGCM(aesCypher) + if err != nil { + return "", fmt.Errorf("PfEncrypt NewGCM: %w", err) + } + + iv := make([]byte, gcm.NonceSize()) + _, err = io.ReadFull(rand.Reader, iv) + if err != nil { + return "", fmt.Errorf("PfEncrypt nonce: %w", err) + } + + ciphertext := gcm.Seal(nil, iv, data, ad) + tagOffset := len(ciphertext) - 16 + tag := ciphertext[tagOffset:] + out := ciphertext[:tagOffset] + return PREFIX + + encodeParts( + part{name: "data", data: out}, + part{name: "iv", data: iv}, + part{name: "tag", data: tag}, + part{name: "ad", data: ad}, + ) + + "]", nil +} + +func decodeParts(input string) ([]part, error) { + after, found := strings.CutPrefix(input, PREFIX) + if !found { + return nil, fmt.Errorf("Invalid format Prefix not found") + } + + data, found := strings.CutSuffix(after, "]") + if !found { + return nil, fmt.Errorf("Invalid format Suffix not found") + } + + parts := make([]part, 0, 4) + + for _, s := range strings.Split(data, ",") { + s = strings.TrimSpace(s) + k, v, found := strings.Cut(s, ":") + if !found { + return nil, fmt.Errorf("Invalid format invalid part") + } + + d, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return nil, fmt.Errorf("Cannot decode value: %w", err) + } + + parts = append(parts, part{name: k, data: d}) + } + + return parts, nil +} + +func getPart(parts []part, name string) (part, bool) { + for _, p := range parts { + if p.name == name { + return p, true + } + } + + return part{}, false +} + +func PfDecrypt(data string) ([]byte, error) { + parts, err := decodeParts(data) + if err != nil { + return nil, err + } + + tagPart, found := getPart(parts, "tag") + if !found { + return nil, fmt.Errorf("Tag Not Found") + } + + ivPart, found := getPart(parts, "iv") + if !found { + return nil, fmt.Errorf("IV Not Found") + } + + dataPart, found := getPart(parts, "data") + if !found { + return nil, fmt.Errorf("Data Not Found") + } + + adPart, found := getPart(parts, "ad") + if !found { + return nil, fmt.Errorf("Associated Data Not Found") + } + + aesCypher, err := aes.NewCipher(dervivedKey) + if err != nil { + return nil, fmt.Errorf("PfDerypt NewCipher: %w", err) + } + + gcm, err := cipher.NewGCM(aesCypher) + if err != nil { + return nil, fmt.Errorf("PfDerypt NewGCM: %w", err) + } + + ciphertext := make([]byte, len(tagPart.data)+len(dataPart.data)) + copy(ciphertext, dataPart.data) + copy(ciphertext[len(dataPart.data):], tagPart.data) + output, err := gcm.Open(nil, ivPart.data, ciphertext, adPart.data) + if err != nil { + return nil, fmt.Errorf("PfDerypt GCM.Open: %w", err) + } + + return output, nil +} + +func makeDerivedKey() []byte { + return pbkdf2.Key(systemInitKey, []byte("packetfence"), ITERATION_COUNT, LEN, sha256.New) +} diff --git a/go/config/pfcrypt/pfcrypt_test.go b/go/config/pfcrypt/pfcrypt_test.go new file mode 100644 index 000000000000..1df114211cbc --- /dev/null +++ b/go/config/pfcrypt/pfcrypt_test.go @@ -0,0 +1,60 @@ +package pfcrypt + +import ( + "bytes" + "os/exec" + "testing" +) + +func TestRoundTrip(t *testing.T) { + + input := []byte("Hello Test") + ciphertext, err := PfEncrypt(input) + if err != nil { + t.Fatalf("PfEncrypt: %s", err.Error()) + } + + output, err := PfDecrypt(ciphertext) + if err != nil { + t.Fatalf("PfDecrypt: %s", err.Error()) + } + + if bytes.Compare(input, output) != 0 { + t.Fatalf("Input does not match Output") + } + +} + +func TestPerl(t *testing.T) { + expected := []byte("Hello Test") + cmd := exec.Command("perl", "-I/usr/local/pf/lib", "-I/usr/local/pf/lib_perl/lib/perl5", "-Mpf::config::crypt", "-eprint pf::config::crypt::pf_encrypt('Hello Test')") + ciphertext, err := cmd.Output() + if err != nil { + t.Fatalf("perl crypt: %s", err.Error()) + } + + output, err := PfDecrypt(string(ciphertext)) + if err != nil { + t.Fatalf("PfDecrypt: %s", err.Error()) + } + + if bytes.Compare(expected, output) != 0 { + t.Fatalf("expected does not match Output") + } + + text, err := PfEncrypt([]byte(expected)) + if err != nil { + t.Fatalf("PfEncrypt: %s", err.Error()) + } + + cmd = exec.Command("perl", "-I/usr/local/pf/lib", "-I/usr/local/pf/lib_perl/lib/perl5", "-Mpf::config::crypt", "-eprint pf::config::crypt::pf_decrypt($ARGV[0])", text) + output, err = cmd.Output() + if err != nil { + t.Fatalf("perl crypt: %s", err.Error()) + } + + if bytes.Compare(expected, output) != 0 { + t.Fatalf("expected does not match Output") + } + +} diff --git a/go/config/pfcrypt/string.go b/go/config/pfcrypt/string.go new file mode 100644 index 000000000000..a03cbd80d8b1 --- /dev/null +++ b/go/config/pfcrypt/string.go @@ -0,0 +1,49 @@ +package pfcrypt + +import ( + "encoding/json" + "strings" +) + +type CryptString string + +func (c CryptString) AsEncrypted() (string, error) { + if strings.HasPrefix(string(c), PREFIX) { + return string(c), nil + } + + return PfEncrypt([]byte(c)) +} + +func (c CryptString) String() string { + return string(c) +} + +func (c CryptString) MarshalJSON() ([]byte, error) { + out, err := c.AsEncrypted() + if err != nil { + return nil, err + } + + return json.Marshal(out) +} + +func (c *CryptString) UnmarshalJSON(in []byte) error { + str := "" + if err := json.Unmarshal(in, &str); err != nil { + return err + } + + if !strings.HasPrefix(str, PREFIX) { + *c = CryptString(str) + return nil + } + + out, err := PfDecrypt(str) + if err != nil { + return err + } + + *c = CryptString(string(out)) + return nil +} diff --git a/go/config/pfcrypt/string_test.go b/go/config/pfcrypt/string_test.go new file mode 100644 index 000000000000..09a92235fc89 --- /dev/null +++ b/go/config/pfcrypt/string_test.go @@ -0,0 +1,44 @@ +package pfcrypt + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" +) + +type testJson struct { + F1 CryptString + F2 string +} + +func TestString(t *testing.T) { + data := testJson{"Value1", "Value2"} + out, err := json.Marshal(&data) + if err != nil { + t.Fatalf("%s", err.Error()) + } + + if !bytes.Contains(out, []byte(PREFIX)) { + t.Fatalf("%s does not contain encrypted data", string(out)) + } + + got := testJson{} + if err = json.Unmarshal(out, &got); err != nil { + t.Fatalf("%s", err.Error()) + } + + if diff := cmp.Diff(data, got); diff != "" { + t.Fatalf("Did not match %s", diff) + } + + if err = json.Unmarshal([]byte(`{"F1":"Value1","F2":"Value2"}`), &got); err != nil { + t.Fatalf("%s", err.Error()) + } + + if diff := cmp.Diff(data, got); diff != "" { + t.Fatalf("Did not match %s", diff) + } + +} diff --git a/go/db/db.go b/go/db/db.go index ed9c1bdd59da..4edf1f88b5e3 100644 --- a/go/db/db.go +++ b/go/db/db.go @@ -37,7 +37,7 @@ func ManualConnectDb(ctx context.Context, user, pass, host, port, dbName string) func DbLocalFromConfig(ctx context.Context) (*sql.DB, error) { dbConfig := pfconfigdriver.GetType[pfconfigdriver.PfConfDatabase](ctx) - return ManualConnectDb(ctx, dbConfig.User, dbConfig.Pass, "localhost", dbConfig.Port, dbConfig.Db) + return ManualConnectDb(ctx, dbConfig.User, dbConfig.Pass.String(), "localhost", dbConfig.Port, dbConfig.Db) } func ConnectDb(ctx context.Context, dbName string) (*sql.DB, error) { @@ -68,7 +68,7 @@ func ReturnURIFromConfig(ctx context.Context, dbName ...string) string { DBName = dbConfig.Db } - return ReturnURI(ctx, dbConfig.User, dbConfig.Pass, dbConfig.Host, dbConfig.Port, DBName) + return ReturnURI(ctx, dbConfig.User, dbConfig.Pass.String(), dbConfig.Host, dbConfig.Port, DBName) } func ReturnURI(ctx context.Context, user, pass, host, port, dbName string) string { diff --git a/go/file_paths/paths.go b/go/file_paths/paths.go index 8fa6b6ef81aa..ddfa5dc9c9dc 100644 --- a/go/file_paths/paths.go +++ b/go/file_paths/paths.go @@ -6,4 +6,6 @@ const PF_DIR = "/usr/local/pf" var VAR_DIR = filepath.Join(PF_DIR, "var") var RUN_DIR = filepath.Join(VAR_DIR, "run") +var CONF_DIR = filepath.Join(PF_DIR, "conf") var PFQUEUE_BACKEND_SOCKET = filepath.Join(RUN_DIR, "pfqueue-backend.sock") +var SYSTEM_INIT_KEY_FILE = filepath.Join(CONF_DIR, "system_init_key") diff --git a/go/firewallsso/barracudang.go b/go/firewallsso/barracudang.go index bc3dcb29c2dc..2981844acb68 100644 --- a/go/firewallsso/barracudang.go +++ b/go/firewallsso/barracudang.go @@ -5,14 +5,15 @@ import ( "fmt" "github.com/inverse-inc/go-utils/log" + "github.com/inverse-inc/packetfence/go/config/pfcrypt" "golang.org/x/crypto/ssh" ) type BarracudaNG struct { FirewallSSO - Username string `json:"username"` - Password string `json:"password"` - Port string `json:"port"` + Username string `json:"username"` + Password pfcrypt.CryptString `json:"password"` + Port string `json:"port"` } // Get an SSH session to the firewall @@ -20,7 +21,7 @@ func (fw *BarracudaNG) getSshSession(ctx context.Context) (*ssh.Session, error) sshConfig := &ssh.ClientConfig{ User: fw.Username, Auth: []ssh.AuthMethod{ - ssh.Password(fw.Password), + ssh.Password(fw.Password.String()), }, HostKeyCallback: ssh.InsecureIgnoreHostKey(), } diff --git a/go/firewallsso/checkpoint.go b/go/firewallsso/checkpoint.go index 16db3ed46d1a..93f37a7e1ead 100644 --- a/go/firewallsso/checkpoint.go +++ b/go/firewallsso/checkpoint.go @@ -9,12 +9,13 @@ import ( "github.com/inverse-inc/go-radius/rfc2865" "github.com/inverse-inc/go-radius/rfc2866" "github.com/inverse-inc/go-utils/log" + "github.com/inverse-inc/packetfence/go/config/pfcrypt" ) type Checkpoint struct { FirewallSSO - Password string `json:"password"` - Port string `json:"port"` + Password pfcrypt.CryptString `json:"password"` + Port string `json:"port"` } // Send an SSO start to the Checkpoint firewall @@ -37,7 +38,7 @@ func (fw *Checkpoint) Start(ctx context.Context, info map[string]string, timeout // Build the RADIUS packet for an SSO start func (fw *Checkpoint) startRadiusPacket(ctx context.Context, info map[string]string, timeout int) *radius.Packet { - r := radius.New(radius.CodeAccountingRequest, []byte(fw.Password)) + r := radius.New(radius.CodeAccountingRequest, []byte(fw.Password.String())) rfc2866.AcctStatusType_Add(r, rfc2866.AcctStatusType_Value_Start) rfc2866.AcctSessionID_AddString(r, "acct_pf-"+info["mac"]) rfc2865.UserName_AddString(r, info["username"]) diff --git a/go/firewallsso/contentkeeper.go b/go/firewallsso/contentkeeper.go index ab2851c9e758..b15c81f149c1 100644 --- a/go/firewallsso/contentkeeper.go +++ b/go/firewallsso/contentkeeper.go @@ -13,12 +13,13 @@ import ( "github.com/inverse-inc/go-radius/rfc2869" "github.com/inverse-inc/go-utils/log" "github.com/inverse-inc/go-utils/sharedutils" + "github.com/inverse-inc/packetfence/go/config/pfcrypt" ) type ContentKeeper struct { FirewallSSO - Password string `json:"password"` - Port string `json:"port"` + Password pfcrypt.CryptString `json:"password"` + Port string `json:"port"` } // Send an SSO start to the ContentKeeper firewall diff --git a/go/firewallsso/familyzone.go b/go/firewallsso/familyzone.go index 033285f0257c..5b44fe58618d 100644 --- a/go/firewallsso/familyzone.go +++ b/go/firewallsso/familyzone.go @@ -9,14 +9,15 @@ import ( "github.com/google/uuid" "github.com/inverse-inc/go-utils/log" + "github.com/inverse-inc/packetfence/go/config/pfcrypt" ) // FamilyZone struct type FamilyZone struct { FirewallSSO - Username string `json:"username"` - Password string `json:"password"` - DeviceID string `json:"deviceid"` + Username string `json:"username"` + Password pfcrypt.CryptString `json:"password"` + DeviceID string `json:"deviceid"` } // Firewall specific init @@ -38,7 +39,7 @@ func (fw *FamilyZone) startHttp(ctx context.Context, info map[string]string, tim log.LoggerWContext(ctx).Error(err.Error()) return false, err } - s := fw.Password + "__" + info["username"] + "_PacketFence_" + id.String() + s := fw.Password.String() + "__" + info["username"] + "_PacketFence_" + id.String() h := sha1.New() h.Write([]byte(s)) sha1Hash := hex.EncodeToString(h.Sum(nil)) diff --git a/go/firewallsso/fortigate.go b/go/firewallsso/fortigate.go index 63d2ed50c7ef..d6fdc83c4470 100644 --- a/go/firewallsso/fortigate.go +++ b/go/firewallsso/fortigate.go @@ -9,12 +9,13 @@ import ( "github.com/inverse-inc/go-radius/rfc2865" "github.com/inverse-inc/go-radius/rfc2866" "github.com/inverse-inc/go-utils/log" + "github.com/inverse-inc/packetfence/go/config/pfcrypt" ) type FortiGate struct { FirewallSSO - Password string `json:"password"` - Port string `json:"port"` + Password pfcrypt.CryptString `json:"password"` + Port string `json:"port"` } // Send an SSO start to the Fortigate firewall diff --git a/go/firewallsso/iboss.go b/go/firewallsso/iboss.go index 8c6c1a5b6a9e..ecbc1d6757a5 100644 --- a/go/firewallsso/iboss.go +++ b/go/firewallsso/iboss.go @@ -7,13 +7,14 @@ import ( "net/http" "github.com/inverse-inc/go-utils/log" + "github.com/inverse-inc/packetfence/go/config/pfcrypt" ) type Iboss struct { FirewallSSO - NacName string `json:"nac_name"` - Password string `json:"password"` - Port string `json:"port"` + NacName string `json:"nac_name"` + Password pfcrypt.CryptString `json:"password"` + Port string `json:"port"` } // Send an SSO start to the Iboss firewall diff --git a/go/firewallsso/jsonrpc.go b/go/firewallsso/jsonrpc.go index 45027327f144..c53ed88d54c6 100644 --- a/go/firewallsso/jsonrpc.go +++ b/go/firewallsso/jsonrpc.go @@ -8,13 +8,14 @@ import ( "github.com/gorilla/rpc/v2/json2" "github.com/inverse-inc/go-utils/log" + "github.com/inverse-inc/packetfence/go/config/pfcrypt" ) type JSONRPC struct { FirewallSSO - Username string `json:"username"` - Password string `json:"password"` - Port string `json:"port"` + Username string `json:"username"` + Password pfcrypt.CryptString `json:"password"` + Port string `json:"port"` } type JSONRPC_Args struct { @@ -54,7 +55,7 @@ func (fw *JSONRPC) makeRpcRequest(ctx context.Context, action string, info map[s return err } req.Header.Set("Content-Type", "application/json") - req.SetBasicAuth(fw.Username, fw.Password) + req.SetBasicAuth(fw.Username, fw.Password.String()) resp, err := fw.getHttpClient(ctx).Do(req) if err != nil { diff --git a/go/firewallsso/junipersrx.go b/go/firewallsso/junipersrx.go index bfdbe5f201b3..7f8f46bff073 100644 --- a/go/firewallsso/junipersrx.go +++ b/go/firewallsso/junipersrx.go @@ -9,13 +9,14 @@ import ( "time" "github.com/inverse-inc/go-utils/log" + "github.com/inverse-inc/packetfence/go/config/pfcrypt" ) type JuniperSRX struct { FirewallSSO - Username string `json:"username"` - Password string `json:"password"` - Port string `json:"port"` + Username string `json:"username"` + Password pfcrypt.CryptString `json:"password"` + Port string `json:"port"` } // Send an SSO start to the JuniperSRX using HTTP @@ -32,7 +33,7 @@ func (fw *JuniperSRX) startHttp(ctx context.Context, info map[string]string, tim dst := fw.getDst(ctx, "tcp", fw.PfconfigHashNS, fw.Port) req, err := http.NewRequest("POST", "https://"+dst+"/api/userfw/v1/post-entry", bytes.NewBuffer([]byte(fw.startHttpPayload(ctx, info)))) - req.SetBasicAuth(fw.Username, fw.Password) + req.SetBasicAuth(fw.Username, fw.Password.String()) client := fw.getHttpClient(ctx) resp, err := client.Do(req) @@ -112,7 +113,7 @@ func (fw *JuniperSRX) Stop(ctx context.Context, info map[string]string) (bool, e func (fw *JuniperSRX) stopHttp(ctx context.Context, info map[string]string) (bool, error) { dst := fw.getDst(ctx, "tcp", fw.PfconfigHashNS, fw.Port) req, err := http.NewRequest("POST", "https://"+dst+"/api/userfw/v1/post-entry", bytes.NewBuffer([]byte(fw.stopHttpPayload(ctx, info)))) - req.SetBasicAuth(fw.Username, fw.Password) + req.SetBasicAuth(fw.Username, fw.Password.String()) client := fw.getHttpClient(ctx) resp, err := client.Do(req) diff --git a/go/firewallsso/watchguard.go b/go/firewallsso/watchguard.go index c886ff5515d2..7ad877c45ab9 100644 --- a/go/firewallsso/watchguard.go +++ b/go/firewallsso/watchguard.go @@ -10,12 +10,13 @@ import ( "github.com/inverse-inc/go-radius/rfc2866" "github.com/inverse-inc/go-utils/log" "github.com/inverse-inc/go-utils/sharedutils" + "github.com/inverse-inc/packetfence/go/config/pfcrypt" ) type WatchGuard struct { FirewallSSO - Password string `json:"password"` - Port string `json:"port"` + Password pfcrypt.CryptString `json:"password"` + Port string `json:"port"` } // Send an SSO start to the WatchGuard firewall diff --git a/go/galeraautofix/mariadb/mariadb.go b/go/galeraautofix/mariadb/mariadb.go index 24a4d51a242d..0c638269b3eb 100644 --- a/go/galeraautofix/mariadb/mariadb.go +++ b/go/galeraautofix/mariadb/mariadb.go @@ -130,7 +130,7 @@ func GetLocalLiveSeqno(ctx context.Context) int { func GetLiveSeqno(ctx context.Context, host string) int { ctx = log.AddToLogContext(ctx, "function", "GetLiveSeqno") conf := DatabaseConfig(ctx) - db, err := db.ManualConnectDb(ctx, conf.User, conf.Pass, host, "3306", conf.Db) + db, err := db.ManualConnectDb(ctx, conf.User, conf.Pass.String(), host, "3306", conf.Db) if err != nil { log.LoggerWContext(ctx).Warn(fmt.Sprintf("Unable to connect to database on %s : %s", host, err.Error())) return DefaultSeqno @@ -158,7 +158,7 @@ func GetLiveSeqno(ctx context.Context, host string) int { func IsDBAvailable(ctx context.Context, host string) bool { ctx = log.AddToLogContext(ctx, "function", "IsDBAvailable") conf := DatabaseConfig(ctx) - db, err := db.ManualConnectDb(ctx, conf.User, conf.Pass, host, "3306", conf.Db) + db, err := db.ManualConnectDb(ctx, conf.User, conf.Pass.String(), host, "3306", conf.Db) if err != nil { log.LoggerWContext(ctx).Warn(fmt.Sprintf("Unable to connect to database on %s : %s", host, err.Error())) return false @@ -197,7 +197,7 @@ func IsDBReady(ctx context.Context, host string) bool { ready := IsDBAvailable(ctx, "localhost") if ready { conf := DatabaseConfig(ctx) - db, err := db.ManualConnectDb(ctx, conf.User, conf.Pass, host, "3306", conf.Db) + db, err := db.ManualConnectDb(ctx, conf.User, conf.Pass.String(), host, "3306", conf.Db) if err != nil { log.LoggerWContext(ctx).Warn(fmt.Sprintf("Unable to connect to database on %s : %s", host, err.Error())) return false diff --git a/go/jsonrpc2/client.go b/go/jsonrpc2/client.go index 61f18ccbebac..13ad78c89d6d 100644 --- a/go/jsonrpc2/client.go +++ b/go/jsonrpc2/client.go @@ -41,10 +41,10 @@ type Client struct { } type JsonRPC2Request struct { - Method string `json:"method"` - JsonRPC string `json:"jsonrpc"` - Params interface{} `json:"params"` - Id uint `json:"id,omitempty"` + Method string `json:"method"` + JsonRPC string `json:"jsonrpc"` + Params interface{} `json:"params"` + Id uint `json:"id,omitempty"` } type JsonRPC2Error struct { @@ -69,7 +69,7 @@ func NewClientFromConfig(ctx context.Context) *Client { pfconfigdriver.FetchDecodeSocket(ctx, &webservices) return &Client{ Username: webservices.User, - Password: webservices.Pass, + Password: webservices.Pass.String(), Proto: webservices.Proto, Host: webservices.Host, Port: webservices.Port, @@ -83,20 +83,20 @@ func NewAAAClientFromConfig(ctx context.Context) *Client { pfconfigdriver.FetchDecodeSocket(ctx, &ports) return &Client{ Username: webservices.User, - Password: webservices.Pass, + Password: webservices.Pass.String(), Proto: webservices.AAAProto, Host: webservices.Host, Port: ports.AAA, } } -func (c *Client) Call(ctx context.Context, method string, args interface{} ) (interface{}, error) { +func (c *Client) Call(ctx context.Context, method string, args interface{}) (interface{}, error) { c.Id++ request := JsonRPC2Request{ - Method: method, - JsonRPC: "2.0", - Params: args, - Id: c.Id, + Method: method, + JsonRPC: "2.0", + Params: args, + Id: c.Id, } r, err := c.buildRequest(&request) @@ -127,12 +127,12 @@ func (c *Client) Call(ctx context.Context, method string, args interface{} ) (in return response.Result, nil } -func (c *Client) Notify(ctx context.Context, method string, args interface{} ) error { +func (c *Client) Notify(ctx context.Context, method string, args interface{}) error { request := JsonRPC2Request{ - Method: method, - JsonRPC: "2.0", - Params: args, - Id: 0, + Method: method, + JsonRPC: "2.0", + Params: args, + Id: 0, } r, err := c.buildRequest(&request) diff --git a/go/pfconfigdriver/structs.go b/go/pfconfigdriver/structs.go index 56e9e9329cd6..8dac045e54b2 100644 --- a/go/pfconfigdriver/structs.go +++ b/go/pfconfigdriver/structs.go @@ -7,6 +7,7 @@ import ( "time" "github.com/inverse-inc/go-utils/sharedutils" + "github.com/inverse-inc/packetfence/go/config/pfcrypt" ) // Interface for a pfconfig object. Not doing much now but it is there for future-proofing @@ -183,19 +184,19 @@ type PfConfServices struct { type PfConfWebservices struct { StructConfig - PfconfigMethod string `val:"hash_element"` - PfconfigNS string `val:"config::Pf"` - PfconfigHashNS string `val:"webservices"` - Pass string `json:"pass"` - Proto string `json:"proto"` - User string `json:"user"` - Port string `json:"port"` - AAAHost string `json:"aaa_host"` - AAAPort string `json:"aaa_port"` - AAAProto string `json:"aaa_proto"` - UnifiedAPIHost string `json:"unifiedapi_host"` - UnifiedAPIPort string `json:"unifiedapi_port"` - Host string `json:"host"` + PfconfigMethod string `val:"hash_element"` + PfconfigNS string `val:"config::Pf"` + PfconfigHashNS string `val:"webservices"` + Pass pfcrypt.CryptString `json:"pass"` + Proto string `json:"proto"` + User string `json:"user"` + Port string `json:"port"` + AAAHost string `json:"aaa_host"` + AAAPort string `json:"aaa_port"` + AAAProto string `json:"aaa_proto"` + UnifiedAPIHost string `json:"unifiedapi_host"` + UnifiedAPIPort string `json:"unifiedapi_port"` + Host string `json:"host"` } type UnifiedApiSystemUser struct { @@ -208,14 +209,14 @@ type UnifiedApiSystemUser struct { type PfConfDatabase struct { StructConfig - PfconfigMethod string `val:"hash_element"` - PfconfigNS string `val:"config::Pf"` - PfconfigHashNS string `val:"database"` - User string `json:"user"` - Pass string `json:"pass"` - Host string `json:"host"` - Port string `json:"port"` - Db string `json:"db"` + PfconfigMethod string `val:"hash_element"` + PfconfigNS string `val:"config::Pf"` + PfconfigHashNS string `val:"database"` + User string `json:"user"` + Pass pfcrypt.CryptString `json:"pass"` + Host string `json:"host"` + Port string `json:"port"` + Db string `json:"db"` } type ManagementNetwork struct { @@ -500,24 +501,24 @@ type AuthenticationSourceRadius struct { type AuthenticationSourceLdap struct { StructConfig - PfconfigMethod string `val:"hash_element"` - PfconfigNS string `val:"resource::authentication_sources_ldap"` - PfconfigHashNS string `val:"-"` - Description string `json:"description"` - Password string `json:"password"` - Port string `json:"port"` - Host []string `json:"host"` - ReadTimeout string `json:"read_timeout"` - WriteTimeout string `json:"write_timeout"` - BaseDN string `json:"basedn"` - Scope string `json:"scope"` - EmailAttribute string `json:"email_attribute"` - UserNameAttribute string `json:"usernameattribute"` - UseConnector bool `json:"use_connector"` - BindDN string `json:"binddn"` - Encryption string `json:"encryption"` - Monitor string `json:"monitor"` - Type string `json:"type"` + PfconfigMethod string `val:"hash_element"` + PfconfigNS string `val:"resource::authentication_sources_ldap"` + PfconfigHashNS string `val:"-"` + Description string `json:"description"` + Password pfcrypt.CryptString `json:"password"` + Port string `json:"port"` + Host []string `json:"host"` + ReadTimeout string `json:"read_timeout"` + WriteTimeout string `json:"write_timeout"` + BaseDN string `json:"basedn"` + Scope string `json:"scope"` + EmailAttribute string `json:"email_attribute"` + UserNameAttribute string `json:"usernameattribute"` + UseConnector bool `json:"use_connector"` + BindDN string `json:"binddn"` + Encryption string `json:"encryption"` + Monitor string `json:"monitor"` + Type string `json:"type"` } func (t *AuthenticationSourceLdap) UnmarshalJSON(data []byte) error { @@ -630,28 +631,28 @@ type PfConfParking struct { type PfConfAlerting struct { StructConfig - PfconfigMethod string `val:"hash_element"` - PfconfigNS string `val:"config::Pf"` - PfconfigHashNS string `val:"alerting"` - EmailAddr string `json:"emailaddr"` - FromAddr string `json:"fromaddr"` - SMTPPassword string `json:"smtp_password"` - SMTPEncryption string `json:"smtp_encryption"` - SubjectPrefic string `json:"subjectprefix"` - SMTPUsername string `json:"smtp_username"` - SMTPTimeout string `json:"smtp_timeout"` - SMTPPort int `json:"smtp_port"` - SMTPVerifySSL string `json:"smtp_verifyssl"` - SMTPServer string `json:"smtpserver"` + PfconfigMethod string `val:"hash_element"` + PfconfigNS string `val:"config::Pf"` + PfconfigHashNS string `val:"alerting"` + EmailAddr string `json:"emailaddr"` + FromAddr string `json:"fromaddr"` + SMTPPassword pfcrypt.CryptString `json:"smtp_password"` + SMTPEncryption string `json:"smtp_encryption"` + SubjectPrefic string `json:"subjectprefix"` + SMTPUsername string `json:"smtp_username"` + SMTPTimeout string `json:"smtp_timeout"` + SMTPPort int `json:"smtp_port"` + SMTPVerifySSL string `json:"smtp_verifyssl"` + SMTPServer string `json:"smtpserver"` } type PfConfActiveActive struct { StructConfig - PfconfigMethod string `val:"hash_element"` - PfconfigNS string `val:"config::Pf"` - PfconfigHashNS string `val:"active_active"` - GaleraReplicationUsername string `json:"galera_replication_username"` - GaleraReplicationPassword string `json:"galera_replication_password"` + PfconfigMethod string `val:"hash_element"` + PfconfigNS string `val:"config::Pf"` + PfconfigHashNS string `val:"active_active"` + GaleraReplicationUsername string `json:"galera_replication_username"` + GaleraReplicationPassword pfcrypt.CryptString `json:"galera_replication_password"` } type AllClusterServers struct { diff --git a/go/plugin/caddy2/api-aaa/api-aaa.go b/go/plugin/caddy2/api-aaa/api-aaa.go index f113bddee995..a867f8184c65 100644 --- a/go/plugin/caddy2/api-aaa/api-aaa.go +++ b/go/plugin/caddy2/api-aaa/api-aaa.go @@ -177,7 +177,7 @@ func (h *ApiAAAHandler) buildApiAAAHandler(ctx context.Context) error { h.authentication.AddAuthenticationBackend(h.webservicesBackend) if webservices.User != "" { - h.webservicesBackend.SetUser(webservices.User, webservices.Pass) + h.webservicesBackend.SetUser(webservices.User, webservices.Pass.String()) } // Backend for SSO @@ -366,7 +366,7 @@ func (h *ApiAAAHandler) ServeHTTP(w http.ResponseWriter, r *http.Request, next c webservices := pfconfigdriver.GetStruct(ctx, "PfConfWebservices").(*pfconfigdriver.PfConfWebservices) // Reload the webservices user info if webservices.User != "" { - h.webservicesBackend.SetUser(webservices.User, webservices.Pass) + h.webservicesBackend.SetUser(webservices.User, webservices.Pass.String()) } defer panichandler.Http(ctx, w) diff --git a/go/plugin/caddy2/api-aaa/api-aaa_test.go b/go/plugin/caddy2/api-aaa/api-aaa_test.go index 94df9f2fb750..ecbb7b305973 100644 --- a/go/plugin/caddy2/api-aaa/api-aaa_test.go +++ b/go/plugin/caddy2/api-aaa/api-aaa_test.go @@ -59,7 +59,7 @@ func TestApiAAALogin(t *testing.T) { func TestApiAAATokenInfo(t *testing.T) { webservices := pfconfigdriver.GetStruct(ctx, "PfConfWebservices").(*pfconfigdriver.PfConfWebservices) - _, token, _ := apiAAA.authentication.Login(ctx, webservices.User, webservices.Pass) + _, token, _ := apiAAA.authentication.Login(ctx, webservices.User, webservices.Pass.String()) tokenInfo, _ := apiAAA.authorization.GetTokenInfo(ctx, token) req, _ := http.NewRequest("GET", "/api/v1/token_info", nil) diff --git a/go/plugin/caddy2/pfpki/models/models.go b/go/plugin/caddy2/pfpki/models/models.go index c1048cb75995..decb61ba6e8b 100644 --- a/go/plugin/caddy2/pfpki/models/models.go +++ b/go/plugin/caddy2/pfpki/models/models.go @@ -2095,7 +2095,7 @@ func email(ctx context.Context, email EmailType) (types.Info, error) { return err })) } - d := gomail.NewDialer(alerting.SMTPServer, alerting.SMTPPort, alerting.SMTPUsername, alerting.SMTPPassword) + d := gomail.NewDialer(alerting.SMTPServer, alerting.SMTPPort, alerting.SMTPUsername, alerting.SMTPPassword.String()) if alerting.SMTPVerifySSL == "disabled" || alerting.SMTPEncryption == "none" { d.TLSConfig = &tls.Config{InsecureSkipVerify: true} diff --git a/html/pfappserver/lib/pfappserver/Form/Field/ObfuscatedText.pm b/html/pfappserver/lib/pfappserver/Form/Field/ObfuscatedText.pm index 503abfd40ff2..8520d067face 100644 --- a/html/pfappserver/lib/pfappserver/Form/Field/ObfuscatedText.pm +++ b/html/pfappserver/lib/pfappserver/Form/Field/ObfuscatedText.pm @@ -15,8 +15,11 @@ extends 'HTML::FormHandler::Field::Text'; use pf::util; use namespace::autoclean; +use pf::config::crypt; has '+type_attr' => ( default => 'password' ); +has '+inflate_default_method'=> ( default => sub { \&inflate } ); +has '+deflate_value_method'=> ( default => sub { \&deflate } ); sub BUILD { my ($self, @args) = @_; @@ -42,6 +45,19 @@ sub element_attributes { return $attr; } + +sub deflate { + my ($self, $value ) = @_; + $value = pf::config::crypt::pf_encrypt($value); + return $value; +} + +sub inflate { + my ($self, $value ) = @_; + $value = pf::config::crypt::pf_decrypt($value); + return $value; +} + =head1 COPYRIGHT Copyright (C) 2005-2024 Inverse inc. diff --git a/lib/pf/ConfigStore/All.pm b/lib/pf/ConfigStore/All.pm new file mode 100644 index 000000000000..c2cc82f679bf --- /dev/null +++ b/lib/pf/ConfigStore/All.pm @@ -0,0 +1,62 @@ +package pf::ConfigStore::All; + +=head1 NAME + +pf::ConfigStore::All - + +=head1 DESCRIPTION + +pf::ConfigStore::All + +=cut + +use strict; +use warnings; +use Role::Tiny qw(); + +use Module::Pluggable + 'search_path' => [qw(pf::ConfigStore)], + 'sub_name' => '_all_stores', + 'require' => 1, + 'inner' => 0, + ; + +our @STORES; + +sub all_stores { + if (!@STORES) { + my @tmp_stores = __PACKAGE__->_all_stores(); + @STORES = grep { $_ ne __PACKAGE__ && !Role::Tiny->is_role($_) && !$_->does('pf::ConfigStore::Group') && !$_->does('pf::ConfigStore::Filtered') } @tmp_stores; + } + + return [@STORES]; +} + +=head1 AUTHOR + +Inverse inc. + +=head1 COPYRIGHT + +Copyright (C) 2005-2024 Inverse inc. + +=head1 LICENSE + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, +USA. + +=cut + +1; diff --git a/lib/pf/Sereal.pm b/lib/pf/Sereal.pm index 6e2801735116..b4c0def269d4 100644 --- a/lib/pf/Sereal.pm +++ b/lib/pf/Sereal.pm @@ -18,9 +18,10 @@ use Sereal::Encoder; use Sereal::Decoder; use base qw(Exporter); -our @EXPORT_OK = qw($ENCODER $DECODER); +our @EXPORT_OK = qw($ENCODER $DECODER $ENCODER_FREEZER); -our $ENCODER = Sereal::Encoder->new; +our $ENCODER = Sereal::Encoder->new(); +our $ENCODER_FREEZER = Sereal::Encoder->new({ freeze_callbacks => 1}); our $DECODER = Sereal::Decoder->new; =head2 CLONE @@ -32,6 +33,7 @@ Reinitialize ENCODER/DECODER when a new thread is created sub CLONE { $ENCODER = Sereal::Encoder->new; $DECODER = Sereal::Decoder->new; + $ENCODER_FREEZER = Sereal::Encoder->new({ freeze_callbacks => 1}); } =head1 AUTHOR diff --git a/lib/pf/cluster.pm b/lib/pf/cluster.pm index 804779cbb82d..2aa100a8be10 100644 --- a/lib/pf/cluster.pm +++ b/lib/pf/cluster.pm @@ -38,14 +38,7 @@ use POSIX qw(ceil); use Crypt::CBC; use pf::config::cluster; use Role::Tiny qw(); - - -use Module::Pluggable - 'search_path' => [qw(pf::ConfigStore)], - 'sub_name' => '_all_stores', - 'require' => 1, - 'inner' => 0, - ; +use pf::ConfigStore::All; use Exporter; @@ -399,13 +392,12 @@ sub sync_storages { my ($stores, %options) = @_; require pf::api::jsonrpcclient; my $apiclient = pf::api::jsonrpcclient->new(); - foreach my $store (@$stores){ + foreach my $store (@$stores) { eval { get_logger->info("Synching storage : $store"); my $cs = $store->new; my $pfconfig_namespace = $cs->pfconfigNamespace; - - if($pfconfig_namespace) { + if ($pfconfig_namespace) { my $config_file = $cs->configFile; my %data = ( namespace => $pfconfig_namespace, @@ -649,7 +641,7 @@ Returns the list of ConfigStore to synchronize between cluster members =cut sub stores_to_sync { - my @tmp_stores = __PACKAGE__->_all_stores(); + my @tmp_stores = pf::ConfigStore::All->all_stores(); my @stores = grep { !Role::Tiny->is_role($_) && !$_->does('pf::ConfigStore::Group') && !$_->does('pf::ConfigStore::Filtered') } @tmp_stores; return \@stores; diff --git a/lib/pf/config/crypt.pm b/lib/pf/config/crypt.pm new file mode 100644 index 000000000000..fa224a47a05a --- /dev/null +++ b/lib/pf/config/crypt.pm @@ -0,0 +1,142 @@ +package pf::config::crypt; + +=head1 NAME + +pf::config::crypt - + +=head1 DESCRIPTION + +pf::config::crypt + +=cut + +use strict; +use warnings; +use Crypt::KeyDerivation qw(pbkdf2); +use Crypt::Mode::CBC; +use Crypt::PRNG qw(random_bytes); +use Crypt::AuthEnc::GCM qw(gcm_encrypt_authenticate gcm_decrypt_verify); +use MIME::Base64; +use pf::file_paths qw($system_init_key_file); + +our $PREFIX = 'PF_ENC['; +our $ITERATION_COUNT = 5000; +our $HASH_TYPE = 'SHA256'; +our $LEN = 32; +our $SYSTEM_INIT_KEY = ''; +our $DERIVED_KEY; + +BEGIN { + $ITERATION_COUNT = 5000; + $HASH_TYPE = 'SHA256'; + $LEN = 32; + my $val = $ENV{PF_SYSTEM_INIT_KEY}; + if ($val) { + $SYSTEM_INIT_KEY = $val; + } else { + open(my $fh, "<", $system_init_key_file) or die "open($system_init_key_file): $!"; + local $/ = undef; + $SYSTEM_INIT_KEY = <$fh>; + close($fh); + } +} + +sub derived_key { + my ($init) = @_; + return pbkdf2($init, 'packetfence', $ITERATION_COUNT, $HASH_TYPE, $LEN); +} + +BEGIN { + if ($SYSTEM_INIT_KEY eq '') { + die "system init key"; + } + + $DERIVED_KEY = derived_key($SYSTEM_INIT_KEY); +} + +sub encode_tags { + if (@_ % 2) { + die "odd number of passed"; + } + my @parts; + while (@_) { + my ($id, $data) = (shift,shift); + push @parts, "$id:". encode_base64($data, ''); + } + + return join(",", @parts); +} + +sub decode_tags { + my ($data) = @_; + $data =~ /^PF_ENC\[(.*)\]/; + my $tags = $1; + my %parts; + for my $part (split /\s*,\s*/, $tags) { + my ($k, $v) = split ':', $part, 2; + $parts{$k} = decode_base64($v); + } + return \%parts; +} + +sub pf_encrypt { + my ($text) = @_; + return pf_encrypt_with_key($DERIVED_KEY, $text); +} + +sub pf_decrypt { + my ($data) = @_; + return pf_decrypt_with_key($DERIVED_KEY, $data); +} + +sub pf_encrypt_with_key { + my ($key, $text) = @_; + if (rindex($text, $PREFIX, 0) == 0) { + return $text; + } + + my $iv = random_bytes(12); + my $ad = ''; + my ($ciphertext, $tag) = gcm_encrypt_authenticate('AES', $key, $iv, $ad, $text); + return $PREFIX . encode_tags(data => $ciphertext, tag => $tag, iv => $iv, ad => $ad) . ']'; +} + +sub pf_decrypt_with_key { + my ($key, $data) = @_; + if (rindex($data, $PREFIX, 0) != 0) { + return $data; + } + + my $tags = decode_tags($data); + return gcm_decrypt_verify('AES', $key, $tags->{iv}, $tags->{ad}, $tags->{data}, $tags->{tag}); +} + +=head1 AUTHOR + +Inverse inc. + +=head1 COPYRIGHT + +Copyright (C) 2005-2024 Inverse inc. + +=head1 LICENSE + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, +USA. + +=cut + +1; + diff --git a/lib/pf/config/crypt/object.pm b/lib/pf/config/crypt/object.pm new file mode 100644 index 000000000000..9ec317813923 --- /dev/null +++ b/lib/pf/config/crypt/object.pm @@ -0,0 +1,77 @@ +package pf::config::crypt::object; + +=head1 NAME + +pf::config::crypt::object - + +=head1 DESCRIPTION + +pf::config::crypt::object + +=cut + +use strict; +use warnings; +use pf::config::crypt; +use pf::config::crypt::string; + +sub new { + my ($proto, $data) = @_; + my $class = ref($proto) || $proto; + return bless(\$data, $class) +} + +sub THAW { + my ($class, $serializer, $data) = @_; + if (rindex($data, $pf::config::crypt::PREFIX, 0) == 0) { + $data = pf::config::crypt::pf_decrypt($data); + } + + return pf::config::crypt::string->new($data); +} + +sub TO_JSON { + ${$_[0]} +} + +use overload + '""' => \&stringify, + fallback => 1; + + +sub stringify { + if (rindex(${$_[0]}, $pf::config::crypt::PREFIX, 0) == 0) { + ${$_[0]} = pf::config::crypt::pf_decrypt(${$_[0]}); + } + + ${$_[0]} +} + +=head1 AUTHOR + +Inverse inc. + +=head1 COPYRIGHT + +Copyright (C) 2005-2024 Inverse inc. + +=head1 LICENSE + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, +USA. + +=cut + +1; diff --git a/lib/pf/config/crypt/object/freeze.pm b/lib/pf/config/crypt/object/freeze.pm new file mode 100644 index 000000000000..e19fc5091fb5 --- /dev/null +++ b/lib/pf/config/crypt/object/freeze.pm @@ -0,0 +1,55 @@ +package pf::config::crypt::object::freeze; + +=head1 NAME + +pf::config::crypt::object::freeze - + +=head1 DESCRIPTION + +pf::config::crypt::object::freeze + +=cut + +use strict; +use warnings; +use pf::config::crypt; + +sub pf::config::crypt::object::FREEZE { + my ($self, $serializer) = @_; + my $data = $$self; + if (rindex($data, $pf::config::crypt::PREFIX, 0) == 0) { + return $data; + } + + return pf::config::crypt::pf_encrypt($data) +} + +=head1 AUTHOR + +Inverse inc. + +=head1 COPYRIGHT + +Copyright (C) 2005-2024 Inverse inc. + +=head1 LICENSE + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, +USA. + +=cut + +1; + diff --git a/lib/pf/config/crypt/string.pm b/lib/pf/config/crypt/string.pm new file mode 100644 index 000000000000..ef101ded8278 --- /dev/null +++ b/lib/pf/config/crypt/string.pm @@ -0,0 +1,59 @@ +package pf::config::crypt::string; + +=head1 NAME + +pf::config::crypt::string - + +=head1 DESCRIPTION + +pf::config::crypt::string + +=cut + +use strict; +use warnings; + +sub new { + my ($proto, $data) = @_; + my $class = ref($proto) || $proto; + return bless(\$data, $class) +} + +use overload + '""' => \&stringify, + fallback => 1; + + +sub stringify { + ${$_[0]} +} + +=head1 AUTHOR + +Inverse inc. + +=head1 COPYRIGHT + +Copyright (C) 2005-2024 Inverse inc. + +=head1 LICENSE + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, +USA. + +=cut + +1; + diff --git a/lib/pf/constants/cluster.pm b/lib/pf/constants/cluster.pm index d454270975c5..875d78f8eeb7 100644 --- a/lib/pf/constants/cluster.pm +++ b/lib/pf/constants/cluster.pm @@ -33,6 +33,7 @@ use pf::file_paths qw( $ip6table_input_management_config_file $local_secret_file $unified_api_system_pass_file + $system_init_key_file $network_behavior_policy_config_file ); @@ -47,6 +48,7 @@ our @FILES_TO_SYNC = ( $radius_ca_cert, $local_secret_file, $unified_api_system_pass_file, + $system_init_key_file, $network_behavior_policy_config_file, $pfconfig::constants::CONFIG_FILE_PATH, $iptable_config_file, diff --git a/lib/pf/file_paths.pm b/lib/pf/file_paths.pm index 4e81f8d6bb85..206afc7d0e08 100644 --- a/lib/pf/file_paths.pm +++ b/lib/pf/file_paths.pm @@ -47,6 +47,7 @@ our ( $local_secret_file, # Unified API system user password $unified_api_system_pass_file, + $system_init_key_file, #profiles.conf variables $profiles_config_file, $profiles_default_config_file, #Other configuraton files variables @@ -157,6 +158,7 @@ BEGIN { $oui_file $oui_url $local_secret_file $unified_api_system_pass_file + $system_init_key_file $profiles_config_file $profiles_default_config_file $switches_config_file $switches_default_config_file $template_switches_config_file $template_switches_default_config_file @@ -285,6 +287,7 @@ $suricata_categories_file = catfile($conf_dir, "suricata_categories.txt"); $nexpose_categories_file = catfile($conf_dir, "nexpose-responses.txt"); $local_secret_file = catfile($conf_dir, "local_secret"); $unified_api_system_pass_file = catfile($conf_dir, "unified_api_system_pass"); +$system_init_key_file = catfile($conf_dir, "system_init_key"); $pf_doc_file = catfile($conf_dir, "documentation.conf"); $oauth_ip_file = catfile($conf_dir, "oauth2-ips.conf"); $ui_config_file = catfile($conf_dir, "ui.conf"); diff --git a/lib/pf/util.pm b/lib/pf/util.pm index a793af314b3c..e049124c3ea2 100644 --- a/lib/pf/util.pm +++ b/lib/pf/util.pm @@ -124,6 +124,7 @@ BEGIN { chown_pf norm_net_mask safe_pf_run + starts_with ); } @@ -1886,10 +1887,6 @@ sub extract { return $default; } -sub ends_with { - return $_[1] eq substr($_[0], -length($_[1])); -} - sub split_pem { my ($s) = @_; my @parts; @@ -1976,6 +1973,14 @@ sub norm_net_mask { return $mask_norm_dotted; } +sub starts_with { + return rindex($_[0], $_[1], 0) == 0; +} + +sub ends_with { + return index($_[0], $_[1], -length($_[1])) != -1; +} + =back =head1 AUTHOR diff --git a/lib/pfconfig/backend/mysql.pm b/lib/pfconfig/backend/mysql.pm index 1a18257cab48..b6c946e96b75 100644 --- a/lib/pfconfig/backend/mysql.pm +++ b/lib/pfconfig/backend/mysql.pm @@ -20,7 +20,8 @@ use DBI; use pfconfig::config; use Try::Tiny; use pf::log; -use pf::Sereal qw($DECODER $ENCODER); +use pf::Sereal qw($DECODER $ENCODER_FREEZER); +use pf::config::crypt::object; use base 'pfconfig::backend'; @@ -230,7 +231,7 @@ sub set { $self->_db_error(); return 0; } - $value = sereal_encode_with_object($ENCODER, $value); + $value = sereal_encode_with_object($ENCODER_FREEZER, $value); my $result; eval { $result = $db->do( "REPLACE INTO $self->{_table} (id, value) VALUES(?,?)", undef, $key, $value ); diff --git a/lib/pfconfig/cached.pm b/lib/pfconfig/cached.pm index 2c99a1d9fb27..23c43b31144b 100644 --- a/lib/pfconfig/cached.pm +++ b/lib/pfconfig/cached.pm @@ -39,6 +39,7 @@ use Sereal::Decoder qw(sereal_decode_with_object); use Time::HiRes qw(stat time); use pf::Sereal qw($DECODER); use pfconfig::config; +use pf::config::crypt::object; use bytes; our $LAST_TOUCH_CACHE = 0; diff --git a/lib/pfconfig/manager.pm b/lib/pfconfig/manager.pm index a486e1e04f34..534921178f10 100644 --- a/lib/pfconfig/manager.pm +++ b/lib/pfconfig/manager.pm @@ -48,6 +48,9 @@ use Tie::IxHash; use pfconfig::config; use pf::constants::user; use pfconfig::git_storage; +use pf::config::crypt; +use pf::config::crypt::object; +use Scalar::Util qw(reftype); my $ordered_prefix = "ORDERED::"; @@ -63,7 +66,37 @@ sub config_builder { my $logger = get_logger; my $elem = $self->get_namespace($namespace); my $tmp = $elem->build(); - return $tmp; + return filter_data($tmp); +} + +sub filter_data { + my ($value) = @_; + return $value if !defined $value; + my $ref_type = reftype($value); + if (!defined ($ref_type)) { + if (rindex($value, $pf::config::crypt::PREFIX, 0) == 0) { + return pf::config::crypt::object->new($value); + } + return $value; + } + + if ($ref_type eq 'ARRAY') { + for (my $i =0;$i<@$value;$i++) { + $value->[$i] = filter_data($value->[$i]); + } + + return $value; + } + + if ($ref_type eq 'HASH') { + while (my ($k, $v) = each %$value) { + $value->{$k} = filter_data($v); + } + + return $value; + } + + return $value; } =head2 get_namespace @@ -231,9 +264,7 @@ sub touch_cache { my $filename = pfconfig::util::control_file_path($what); $filename = untaint_chain($filename); touch_file($filename); - $self->{last_touch_cache} = time; - $pfconfig::cached::LAST_TOUCH_CACHE = time; - $pfconfig::cached::RELOADED_TOUCH_CACHE = time; + $self->{last_touch_cache} = $pfconfig::cached::LAST_TOUCH_CACHE = $pfconfig::cached::RELOADED_TOUCH_CACHE = time; } =head2 get_cache @@ -339,11 +370,12 @@ sub cache_resource { # inflates the element if necessary $result = $self->post_process_element($what, $result); my $cache_w = $self->{cache}->set( $what, $result, 864000 ); - $logger->trace("Cache write gave : $cache_w"); unless ($cache_w) { my $message = "Could not write namespace $what to L2 cache !"; print STDERR $message . "\n"; $logger->error($message); + } else { + $logger->trace("Cache write gave : $cache_w"); } if($self->{pfconfig_server}) { $self->touch_cache($what); diff --git a/lib/pfconfig/namespaces/config/Authentication.pm b/lib/pfconfig/namespaces/config/Authentication.pm index f83b68127a2a..805852725f1a 100644 --- a/lib/pfconfig/namespaces/config/Authentication.pm +++ b/lib/pfconfig/namespaces/config/Authentication.pm @@ -23,6 +23,7 @@ use pfconfig::namespaces::config; use pf::file_paths qw($authentication_config_file); use pf::util qw(isdisabled); use pf::constants::authentication; +use pf::config::crypt; use pf::Authentication::constants; use pf::Authentication::Action; use pf::Authentication::Condition; @@ -31,6 +32,7 @@ use pf::Authentication::utils; use Sort::Naturally qw(nsort); use List::MoreUtils qw(uniq); use pf::constants::authentication; +use pf::config::crypt::object; use base 'pfconfig::namespaces::config'; diff --git a/rpm/packetfence.spec b/rpm/packetfence.spec index 70fae6c2b01a..96269f9eaddc 100644 --- a/rpm/packetfence.spec +++ b/rpm/packetfence.spec @@ -57,6 +57,7 @@ Requires: freeradius >= 3.2.6, freeradius-mysql >= 3.2.6, freeradius-perl >= 3.2 Requires: make Requires: net-tools Requires: sscep +Requires: util-linux Requires: net-snmp >= 5.3.2.2 Requires: net-snmp-perl Requires: perl >= %{perl_version} @@ -695,6 +696,11 @@ if [ ! -f /usr/local/pf/conf/unified_api_system_pass ]; then date +%s | sha256sum | base64 | head -c 32 > /usr/local/pf/conf/unified_api_system_pass fi +# Create server API system user password +if [ ! -f /usr/local/pf/conf/system_init_key ]; then + hexdump -e '/1 "%x"' < /dev/urandom | head -c 32 > /usr/local/pf/conf/system_init_key +fi + for service in httpd snmptrapd portreserve redis netdata do if /bin/systemctl -a | grep $service > /dev/null 2>&1; then @@ -920,6 +926,7 @@ fi %attr(0755, pf, pf) /usr/local/pf/addons/watchdog/*.sh %dir /usr/local/pf/bin %attr(6755, root, root) /usr/local/pf/bin/pfcmd +%attr(6755, root, root) /usr/local/pf/bin/pfcrypt %attr(0755, root, root) /usr/local/pf/bin/ntlm_auth_wrapper %attr(0755, pf, pf) /usr/local/pf/bin/pfcmd.pl %attr(0755, pf, pf) /usr/local/pf/bin/pfcmd_vlan diff --git a/sbin/pfconfig b/sbin/pfconfig index 95aa5e574eac..379a11d61afc 100755 --- a/sbin/pfconfig +++ b/sbin/pfconfig @@ -47,7 +47,7 @@ use pf::services::util; use pf::file_paths qw($var_dir); use pfconfig::constants; use Sereal::Encoder qw(sereal_encode_with_object); -use pf::Sereal qw($ENCODER); +use pf::Sereal qw($ENCODER_FREEZER); use Errno qw(EINTR EAGAIN); use bytes; use pf::util::networking qw(send_data_with_length); @@ -55,6 +55,8 @@ use Linux::Systemd::Daemon 'sd_ready'; use POSIX 'WNOHANG'; use pfconfig::git_storage; use Tie::IxHash; +use pf::config::crypt::object; +use pf::config::crypt::object::freeze; our $RUNNING = 1; @@ -430,7 +432,7 @@ sub encode_output { my ($data) = @_; $encoding //= "sereal"; if($encoding eq "sereal") { - return sereal_encode_with_object($ENCODER, $data); + return sereal_encode_with_object($ENCODER_FREEZER, $data); } elsif($encoding eq "json") { my $json = JSON->new; diff --git a/t/data/authentication.conf b/t/data/authentication.conf index 94ef9ddb336c..a9b0fd9e80b1 100644 --- a/t/data/authentication.conf +++ b/t/data/authentication.conf @@ -405,6 +405,20 @@ type=AD host=127.0.0.1 cache_match=0 +[LDAPWITHENCRYPTEDPASSWORD] +description=pf-test +password=PF_ENC[data:ACooRZUr,iv:D1J3OOW8Cu9itu+m,tag:l5hcRjSwwWswgJ+4J1sOZQ==,ad:] +scope=sub +binddn=CN=test,DC=inverse,DC=ca +basedn=DC=ldap,DC=inverse,DC=ca +usernameattribute=user +connection_timeout=5 +encryption=none +port=33389 +type=AD +host=127.0.0.1 +cache_match=1 + [htpasswd-stripped] realms= set_access_level_action= diff --git a/t/data/system_init_key b/t/data/system_init_key new file mode 100644 index 000000000000..d95438c87653 --- /dev/null +++ b/t/data/system_init_key @@ -0,0 +1 @@ +bc3e2ae9201775dba24d908514fdc663 \ No newline at end of file diff --git a/t/test_paths.pm b/t/test_paths.pm index a921dfe215ad..fda08b1d7ab4 100644 --- a/t/test_paths.pm +++ b/t/test_paths.pm @@ -66,6 +66,7 @@ BEGIN { $pf::file_paths::radius_server_cert = catfile($test_dir,'data/radius_server.crt'); $pf::file_paths::radius_server_key = catfile($test_dir,'data/radius_server.key'); $pf::file_paths::radius_ca_cert = catfile($test_dir,'data/radius_ca.pem'); + $pf::file_paths::system_init_key_file = catfile($test_dir, 'data/system_init_key'); $pfconfig::constants::CONFIG_FILE_PATH = catfile($test_paths::test_dir, 'data/pfconfig.conf'); $pfconfig::constants::SOCKET_PATH = "/usr/local/pf/var/run/pfconfig-test.sock"; diff --git a/t/unittest/config/crypt/object/freeze.t b/t/unittest/config/crypt/object/freeze.t new file mode 100755 index 000000000000..ceba0455f592 --- /dev/null +++ b/t/unittest/config/crypt/object/freeze.t @@ -0,0 +1,76 @@ +#!/usr/bin/perl + +=head1 NAME + +freeze + +=head1 DESCRIPTION + +unit test for object + +=cut + +use strict; +use warnings; + +BEGIN { + #include test libs + use lib qw(/usr/local/pf/t); + #Module for overriding configuration paths + use setup_test_config; +} + +use Test::More tests => 4; +use pf::config::crypt::object; +use pf::config::crypt::object::freeze; +use pf::Sereal qw($DECODER $ENCODER_FREEZER); +use Sereal::Encoder qw(sereal_encode_with_object); +use Sereal::Decoder qw(sereal_decode_with_object); + +#This test will running last +use Test::NoWarnings; +our %authentication_lookup; +tie %authentication_lookup, 'pfconfig::cached_hash', 'resource::authentication_lookup'; + +my $secret = 'secret'; +my $object = pf::config::crypt::object->new($secret); +my $frozen = $object->FREEZE(undef); +my $thawed = $object->THAW(undef, $frozen); +is($secret, $thawed, "Data frozen and thawed"); + +my $data = sereal_encode_with_object($ENCODER_FREEZER, $object); +$thawed = sereal_decode_with_object($DECODER, $data); +is($secret, $thawed, "Data frozen and thawed"); + +use Data::Dumper; print Dumper($authentication_lookup{LDAPWITHENCRYPTEDPASSWORD}); +is($secret, $authentication_lookup{LDAPWITHENCRYPTEDPASSWORD}{password}, "Data frozen and thawed from pfconfig"); + +=head1 AUTHOR + +Inverse inc. + +=head1 COPYRIGHT + +Copyright (C) 2005-2024 Inverse inc. + +=head1 LICENSE + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, +USA. + +=cut + +1; + diff --git a/t/util.t b/t/util.t index 8c67e333aaa7..03c1db38fe75 100755 --- a/t/util.t +++ b/t/util.t @@ -316,7 +316,7 @@ use Test::More; use Test::NoWarnings; BEGIN { - plan tests => 45 + + plan tests => 55 + ((scalar @NODE_ID_TESTS) * 3) + scalar @STRIP_FILENAME_FROM_EXCEPTIONS_TESTS + scalar @INVALID_DATES + @@ -503,6 +503,19 @@ for my $test (@MAC2DEC) { is(extract('[inverse-test@staff.it.acme.edu](mailto:inverse-test@staff.it.acme.edu)','@(\w+)','$1.VLAN'), "staff.VLAN") } +{ + ok(starts_with("dog", "do"), "starts_with"); + ok(starts_with("dog", "dog"), "starts_with"); + ok(starts_with("dog", ""), "starts_with"); + ok(!starts_with("cat", "do"), "starts_with"); + ok(!starts_with("cat", "catt"), "starts_with"); + ok(ends_with("dog", "og"), "ends_with"); + ok(ends_with("dog", "dog"), "ends_with"); + ok(ends_with("dog", ""), "ends_with"); + ok(!ends_with("cat", "og"), "ends_with"); + ok(!ends_with("cat", "catt"), "ends_with"); +} + =head1 AUTHOR Inverse inc. From ecc1b214d5059d3055ec5e450b7a99d8d1032586 Mon Sep 17 00:00:00 2001 From: Darren+Satkunas Date: Thu, 5 Dec 2024 21:54:20 +0000 Subject: [PATCH 176/176] update NEWS --- NEWS.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/NEWS.asciidoc b/NEWS.asciidoc index e769320b0a00..8f35c4901d65 100644 --- a/NEWS.asciidoc +++ b/NEWS.asciidoc @@ -30,6 +30,7 @@ For a list of compatibility related changes see the <