Thursday, March 12, 2020

[389-commits] [389-ds-base] branch 389-ds-base-1.4.2 updated: Issue 50937 - Update CLI for new backend split configuration

This is an automated email from the git hooks/post-receive script.

mreynolds pushed a commit to branch 389-ds-base-1.4.2
in repository 389-ds-base.

The following commit(s) were added to refs/heads/389-ds-base-1.4.2 by this push:
new 005fbc7 Issue 50937 - Update CLI for new backend split configuration
005fbc7 is described below

commit 005fbc72158d358b618c798b82fea9a948544ee1
Author: Mark Reynolds <mreynolds@redhat.com>
AuthorDate: Mon Mar 9 10:04:22 2020 -0400

Issue 50937 - Update CLI for new backend split configuration

Description: In preparation for the move to LMDB the global database
configuration has been split into two (or more) entries
under cn=config. This patch changes how the gets/sets
work to make both of these entries appear as one
configuration unit. This is done by dynamically setting
the backend configuration entry dn with what is set in
nsslapd-backend-implement.

relates: https://pagure.io/389-ds-base/issue/50937

Reviewed by: spichugi, tbordaz, and firstyear(Thanks!!!)

Make changes via Simon's suggestions

Add firstyear's assert
---
src/lib389/lib389/_mapped_object.py | 25 +++++++-
src/lib389/lib389/backend.py | 111 +++++++++++++++++++++++++++++----
src/lib389/lib389/cli_conf/backend.py | 114 +++++++++++++++++-----------------
3 files changed, 178 insertions(+), 72 deletions(-)

diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
index 5b4a2b3..ce0ebfe 100644
--- a/src/lib389/lib389/_mapped_object.py
+++ b/src/lib389/lib389/_mapped_object.py
@@ -7,7 +7,6 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---

-import os
import ldap
import ldap.dn
from ldap import filter as ldap_filter
@@ -15,7 +14,7 @@ import logging
import json
from functools import partial
from lib389._entry import Entry
-from lib389._constants import DIRSRV_STATE_ONLINE, SER_ROOT_DN, SER_ROOT_PW
+from lib389._constants import DIRSRV_STATE_ONLINE
from lib389.utils import (
ensure_bytes, ensure_str, ensure_int, ensure_list_bytes, ensure_list_str,
ensure_list_int, display_log_value, display_log_data
@@ -245,7 +244,7 @@ class DSLdapObject(DSLogging):
raise ValueError("Invalid state. Cannot get presence on instance that is not ONLINE")
self._log.debug("%s present(%r) %s" % (self._dn, attr, value))

- e = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=[attr, ],
+ self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=[attr, ],
serverctrls=self._server_controls, clientctrls=self._client_controls,
escapehatch='i am sure')[0]
values = self.get_attr_vals_bytes(attr)
@@ -589,6 +588,26 @@ class DSLdapObject(DSLogging):
# This could have unforseen consequences ...
return attrs_dict

+ def get_all_attrs_utf8(self, use_json=False):
+ """Get a dictionary having all the attributes of the entry
+
+ :returns: Dict with real attributes and operational attributes
+ """
+
+ self._log.debug("%s get_all_attrs" % (self._dn))
+ if self._instance.state != DIRSRV_STATE_ONLINE:
+ raise ValueError("Invalid state. Cannot get properties on instance that is not ONLINE")
+ else:
+ # retrieving real(*) and operational attributes(+)
+ attrs_entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter,
+ attrlist=["*", "+"], serverctrls=self._server_controls,
+ clientctrls=self._client_controls, escapehatch='i am sure')[0]
+ # getting dict from 'entry' object
+ r = {}
+ for (k, vo) in attrs_entry.data.items():
+ r[k] = ensure_list_str(vo)
+ return r
+
def get_attrs_vals(self, keys, use_json=False):
self._log.debug("%s get_attrs_vals(%r)" % (self._dn, keys))
if self._instance.state != DIRSRV_STATE_ONLINE:
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
index 81706dc..f13586e 100644
--- a/src/lib389/lib389/backend.py
+++ b/src/lib389/lib389/backend.py
@@ -11,7 +11,7 @@ import copy
import ldap
from lib389._constants import *
from lib389.properties import *
-from lib389.utils import normalizeDN, ensure_str, ensure_bytes
+from lib389.utils import normalizeDN, ensure_str, ensure_bytes, assert_c
from lib389 import Entry

# Need to fix this ....
@@ -26,7 +26,7 @@ from lib389.cos import (CosTemplates, CosIndirectDefinitions,
# We need to be a factor to the backend monitor
from lib389.monitor import MonitorBackend
from lib389.index import Index, Indexes, VLVSearches, VLVSearch
-from lib389.tasks import ImportTask, ExportTask, CleanAllRUVTask, Tasks
+from lib389.tasks import ImportTask, ExportTask, Tasks
from lib389.encrypted_attributes import EncryptedAttr, EncryptedAttrs


@@ -341,11 +341,11 @@ class BackendLegacy(object):

def getProperties(self, suffix=None, backend_dn=None, bename=None,
properties=None):
- raise NotImplemented
+ raise NotImplementedError

def setProperties(self, suffix=None, backend_dn=None, bename=None,
properties=None):
- raise NotImplemented
+ raise NotImplementedError

def toSuffix(self, entry=None, name=None):
'''
@@ -933,9 +933,12 @@ class Backends(DSLdapObjects):


class DatabaseConfig(DSLdapObject):
- """Chaining Default Config settings DSLdapObject with:
- - must attributes = ['cn']
- - RDN attribute is 'cn'
+ """Backend Database configuration
+
+ The entire database configuration consists of the main global configuration entry,
+ and the underlying DB library configuration: whither BDB or LMDB. The combined
+ configuration should be presented as a single entity so the end user does not need
+ to worry about what library is being used, and just focus on the configuration.

:param instance: An instance
:type instance: lib389.DirSrv
@@ -943,14 +946,96 @@ class DatabaseConfig(DSLdapObject):
:type dn: str
"""

- _must_attributes = ['cn']
-
- def __init__(self, instance, dn=None):
+ def __init__(self, instance, dn="cn=config,cn=ldbm database,cn=plugins,cn=config"):
super(DatabaseConfig, self).__init__(instance, dn)
self._rdn_attribute = 'cn'
self._must_attributes = ['cn']
+ self._global_attrs = [
+ 'nsslapd-lookthroughlimit',
+ 'nsslapd-mode',
+ 'nsslapd-idlistscanlimit',
+ 'nsslapd-directory',
+ 'nsslapd-import-cachesize',
+ 'nsslapd-idl-switch',
+ 'nsslapd-search-bypass-filter-test',
+ 'nsslapd-search-use-vlv-index',
+ 'nsslapd-exclude-from-export',
+ 'nsslapd-serial-lock',
+ 'nsslapd-subtree-rename-switch',
+ 'nsslapd-pagedlookthroughlimit',
+ 'nsslapd-pagedidlistscanlimit',
+ 'nsslapd-rangelookthroughlimit',
+ 'nsslapd-backend-opt-level',
+ 'nsslapd-backend-implement',
+ ]
+ self._db_attrs = {
+ 'bdb':
+ [
+ 'nsslapd-dbcachesize',
+ 'nsslapd-db-logdirectory',
+ 'nsslapd-db-home-directory',
+ 'nsslapd-db-durable-transaction',
+ 'nsslapd-db-transaction-wait',
+ 'nsslapd-db-checkpoint-interval',
+ 'nsslapd-db-compactdb-interval',
+ 'nsslapd-db-transaction-batch-val',
+ 'nsslapd-db-transaction-batch-min-wait',
+ 'nsslapd-db-transaction-batch-max-wait',
+ 'nsslapd-db-logbuf-size',
+ 'nsslapd-db-locks',
+ 'nsslapd-db-private-import-mem',
+ 'nsslapd-import-cache-autosize',
+ 'nsslapd-cache-autosize',
+ 'nsslapd-cache-autosize-split',
+ 'nsslapd-import-cachesize',
+ 'nsslapd-search-bypass-filter-test',
+ 'nsslapd-serial-lock',
+ 'nsslapd-db-deadlock-policy',
+ ],
+ 'lmdb': []
+ }
self._create_objectclasses = ['top', 'extensibleObject']
self._protected = True
- # Have to set cn=bdb, but when we can choose between bdb and lmdb we'll
- # have some hoops to jump through.
- self._dn = "cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config"
+ # This could be "bdb" or "lmdb", use what we have configured in the global config
+ self._db_lib = self.get_attr_val_utf8_l('nsslapd-backend-implement')
+ self._dn = "cn=config,cn=ldbm database,cn=plugins,cn=config"
+ self._db_dn = f"cn={self._db_lib},cn=config,cn=ldbm database,cn=plugins,cn=config"
+ self._globalObj = DSLdapObject(self._instance, dn=self._dn)
+ self._dbObj = DSLdapObject(self._instance, dn=self._db_dn)
+ # Assert there is no overlap in different config sets
+ assert_c(len(set(self._global_attrs).intersection(set(self._db_attrs['bdb']), set(self._db_attrs['lmdb']))) == 0)
+
+ def get(self):
+ """Get the combined config entries"""
+ # Get and combine both sets of attributes
+ global_attrs = self._globalObj.get_attrs_vals_utf8(self._global_attrs)
+ db_attrs = self._dbObj.get_attrs_vals_utf8(self._db_attrs[self._db_lib])
+ combined_attrs = {**global_attrs, **db_attrs}
+ return combined_attrs
+
+ def display(self):
+ """Display the combined configuration"""
+ global_attrs = self._globalObj.get_attrs_vals_utf8(self._global_attrs)
+ db_attrs = self._dbObj.get_attrs_vals_utf8(self._db_attrs[self._db_lib])
+ combined_attrs = {**global_attrs, **db_attrs}
+ for (k, vo) in combined_attrs.items():
+ if len(vo) == 0:
+ vo = ""
+ else:
+ vo = vo[0]
+ self._instance.log.info(f'{k}: {vo}')
+
+ def set(self, value_pairs):
+ for attr, val in value_pairs:
+ attr = attr.lower()
+ if attr in self._global_attrs:
+ global_config = DSLdapObject(self._instance, dn=self._dn)
+ global_config.replace(attr, val)
+ elif attr in self._db_attrs['bdb']:
+ db_config = DSLdapObject(self._instance, dn=self._db_dn)
+ db_config.replace(attr, val)
+ elif attr in self._db_attrs['lmdb']:
+ pass
+ else:
+ # Unknown attribute
+ raise ValueError("Can not update database configuration with unknown attribute: " + attr)
diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py
index 68e3893..75dfc5f 100644
--- a/src/lib389/lib389/cli_conf/backend.py
+++ b/src/lib389/lib389/cli_conf/backend.py
@@ -138,13 +138,13 @@ def backend_list(inst, basedn, log, args):

be_list.sort()
if args.json:
- print(json.dumps({"type": "list", "items": be_list}, indent=4))
+ log.info(json.dumps({"type": "list", "items": be_list}, indent=4))
else:
if len(be_list) > 0:
for be in be_list:
- print(be)
+ log.info(be)
else:
- print("No backends")
+ log.info("No backends")


def backend_get(inst, basedn, log, args):
@@ -200,7 +200,7 @@ def backend_create(inst, basedn, log, args):
# Unsupported rdn
raise ValueError("Suffix RDN is not supported for creating suffix object. Only 'dc', 'o', 'ou', and 'cn' are supported.")

- print("The database was sucessfully created")
+ log.info("The database was sucessfully created")


def _recursively_del_backends(be):
@@ -227,7 +227,7 @@ def backend_delete(inst, basedn, log, args, warn=True):
_recursively_del_backends(be)
be.delete()

- print("The database, and any sub-suffixes, were sucessfully deleted")
+ log.info("The database, and any sub-suffixes, were sucessfully deleted")


def backend_import(inst, basedn, log, args):
@@ -244,7 +244,7 @@ def backend_import(inst, basedn, log, args):
result = task.get_exit_code()

if task.is_complete() and result == 0:
- print("The import task has finished successfully")
+ log.info("The import task has finished successfully")
else:
raise ValueError("Import task failed\n-------------------------\n{}".format(ensure_str(task.get_task_log())))

@@ -272,7 +272,7 @@ def backend_export(inst, basedn, log, args):
result = task.get_exit_code()

if task.is_complete() and result == 0:
- print("The export task has finished successfully")
+ log.info("The export task has finished successfully")
else:
raise ValueError("Export task failed\n-------------------------\n{}".format(ensure_str(task.get_task_log())))

@@ -329,15 +329,15 @@ def backend_get_subsuffixes(inst, basedn, log, args):
if len(subsuffixes) > 0:
subsuffixes.sort()
if args.json:
- print(json.dumps({"type": "list", "items": subsuffixes}, indent=4))
+ log.info(json.dumps({"type": "list", "items": subsuffixes}, indent=4))
else:
for sub in subsuffixes:
- print(sub)
+ log.info(sub)
else:
if args.json:
- print(json.dumps({"type": "list", "items": []}, indent=4))
+ log.info(json.dumps({"type": "list", "items": []}, indent=4))
else:
- print("No sub-suffixes under this backend")
+ log.info("No sub-suffixes under this backend")


def build_node(suffix, be_name, subsuf=False, link=False, replicated=False):
@@ -476,15 +476,15 @@ def backend_set(inst, basedn, log, args):
be.enable()
if args.disable:
be.disable()
- print("The backend configuration was sucessfully updated")
+ log.info("The backend configuration was successfully updated")


def db_config_get(inst, basedn, log, args):
db_cfg = DatabaseConfig(inst)
if args.json:
- print(db_cfg.get_all_attrs_json())
+ log.info(json.dumps({"type": "entry", "attrs": db_cfg.get()}, indent=4))
else:
- print(db_cfg.display())
+ db_cfg.display()


def db_config_set(inst, basedn, log, args):
@@ -498,17 +498,18 @@ def db_config_set(inst, basedn, log, args):
# We don't support deleting attributes or setting empty values in db
continue
else:
- replace_list.append((attr, value))
+ replace_list.append([attr, value])
if len(replace_list) > 0:
- db_cfg.replace_many(*replace_list)
+ db_cfg.set(replace_list)
elif not did_something:
raise ValueError("There are no changes to set in the database configuration")

- print("Successfully updated database configuration")
+ log.info("Successfully updated database configuration")
+

def _format_status(log, mtype, json=False):
if json:
- print(mtype.get_status_json())
+ log.info(mtype.get_status_json())
else:
status_dict = mtype.get_status()
log.info('dn: ' + mtype._dn)
@@ -517,6 +518,7 @@ def _format_status(log, mtype, json=False):
for vi in v:
log.info('{}: {}'.format(k, vi))

+
def get_monitor(inst, basedn, log, args):
if args.suffix is not None:
# Get a suffix/backend monitor entry
@@ -535,7 +537,7 @@ def get_monitor(inst, basedn, log, args):
def backend_add_index(inst, basedn, log, args):
be = _get_backend(inst, args.be_name)
be.add_index(args.attr, args.index_type, args.matching_rule, reindex=args.reindex)
- print("Successfully added index")
+ log.info("Successfully added index")


def backend_set_index(inst, basedn, log, args):
@@ -562,7 +564,7 @@ def backend_set_index(inst, basedn, log, args):

if args.reindex:
be.reindex(attrs=[args.attr])
- print("Index successfully updated")
+ log.info("Index successfully updated")


def backend_get_index(inst, basedn, log, args):
@@ -576,9 +578,9 @@ def backend_get_index(inst, basedn, log, args):
# Append decoded json object, because we are going to dump it later
results.append(json.loads(entry))
else:
- print(index.display())
+ log.info(index.display())
if args.json:
- print(json.dumps({"type": "list", "items": results}, indent=4))
+ log.info(json.dumps({"type": "list", "items": results}, indent=4))


def backend_list_index(inst, basedn, log, args):
@@ -593,25 +595,25 @@ def backend_list_index(inst, basedn, log, args):
results.append(json.loads(index.get_all_attrs_json()))
else:
if args.just_names:
- print(index.get_attr_val_utf8_l('cn'))
+ log.info(index.get_attr_val_utf8_l('cn'))
else:
- print(index.display())
+ log.info(index.display())

if args.json:
- print(json.dumps({"type": "list", "items": results}, indent=4))
+ log.info(json.dumps({"type": "list", "items": results}, indent=4))


def backend_del_index(inst, basedn, log, args):
be = _get_backend(inst, args.be_name)
for attr in args.attr:
be.del_index(attr)
- print("Successfully deleted index \"{}\"".format(attr))
+ log.info("Successfully deleted index \"{}\"".format(attr))


def backend_reindex(inst, basedn, log, args):
be = _get_backend(inst, args.be_name)
be.reindex(attrs=args.attr, wait=args.wait)
- print("Successfully reindexed database")
+ log.info("Successfully reindexed database")


def backend_attr_encrypt(inst, basedn, log, args):
@@ -622,16 +624,16 @@ def backend_attr_encrypt(inst, basedn, log, args):
for attr in args.add_attr:
be.add_encrypted_attr(attr)
if len(args.add_attr) > 1:
- print("Successfully added encrypted attributes")
+ log.info("Successfully added encrypted attributes")
else:
- print("Successfully added encrypted attribute")
+ log.info("Successfully added encrypted attribute")
if args.del_attr is not None:
for attr in args.del_attr:
be.del_encrypted_attr(attr)
if len(args.del_attr) > 1:
- print("Successfully deleted encrypted attributes")
+ log.info("Successfully deleted encrypted attributes")
else:
- print("Successfully deleted encrypted attribute")
+ log.info("Successfully deleted encrypted attribute")
if args.list:
results = be.get_encrypted_attrs(args.just_names)
if args.json:
@@ -641,17 +643,17 @@ def backend_attr_encrypt(inst, basedn, log, args):
else:
for result in results:
json_results.append(json.loads(result.get_all_attrs_json()))
- print(json.dumps({"type": "list", "items": json_results}, indent=4))
+ log.info(json.dumps({"type": "list", "items": json_results}, indent=4))

else:
if len(results) == 0:
- print("There are no encrypted attributes for this backend")
+ log.info("There are no encrypted attributes for this backend")
else:
for attr in results:
if args.just_names:
- print(attr)
+ log.info(attr)
else:
- print(attr.display())
+ log.info(attr.display())


def backend_list_vlv(inst, basedn, log, args):
@@ -675,24 +677,24 @@ def backend_list_vlv(inst, basedn, log, args):
results.append(entry)
else:
if args.just_names:
- print(vlv.get_attr_val_utf8_l('cn'))
+ log.info(vlv.get_attr_val_utf8_l('cn'))
else:
raw_entry = vlv.get_attrs_vals(VLV_SEARCH_ATTRS)
- print('dn: ' + vlv.dn)
+ log.info('dn: ' + vlv.dn)
for k, v in list(raw_entry.items()):
- print('{}: {}'.format(ensure_str(k), ensure_str(v[0])))
+ log.info('{}: {}'.format(ensure_str(k), ensure_str(v[0])))
indexes = vlv.get_sorts()
sorts = []
- print("Sorts:")
+ log.info("Sorts:")
for idx in indexes:
entry = idx.get_attrs_vals(VLV_INDEX_ATTRS)
- print(' - dn: ' + idx.dn)
+ log.info(' - dn: ' + idx.dn)
for k, v in list(entry.items()):
- print(' - {}: {}'.format(ensure_str(k), ensure_str(v[0])))
- print()
+ log.info(' - {}: {}'.format(ensure_str(k), ensure_str(v[0])))
+ log.info()

if args.json:
- print(json.dumps({"type": "list", "items": results}, indent=4))
+ log.info(json.dumps({"type": "list", "items": results}, indent=4))


def backend_get_vlv(inst, basedn, log, args):
@@ -707,9 +709,9 @@ def backend_get_vlv(inst, basedn, log, args):
results.append(json.loads(entry))
else:
raw_entry = vlv.get_attrs_vals(VLV_SEARCH_ATTRS)
- print('dn: ' + vlv._dn)
+ log.info('dn: ' + vlv._dn)
for k, v in list(raw_entry.items()):
- print('{}: {}'.format(ensure_str(k), ensure_str(v[0])))
+ log.info('{}: {}'.format(ensure_str(k), ensure_str(v[0])))
# Print indexes
indexes = vlv.get_sorts()
for idx in indexes:
@@ -718,14 +720,14 @@ def backend_get_vlv(inst, basedn, log, args):
results.append(json.loads(entry))
else:
raw_entry = idx.get_attrs_vals(VLV_INDEX_ATTRS)
- print('Sorts:')
- print(' - dn: ' + idx._dn)
+ log.info('Sorts:')
+ log.info(' - dn: ' + idx._dn)
for k, v in list(raw_entry.items()):
- print(' - {}: {}'.format(ensure_str(k), ensure_str(v[0])))
- print()
+ log.info(' - {}: {}'.format(ensure_str(k), ensure_str(v[0])))
+ log.info()

if args.json:
- print(json.dumps({"type": "list", "items": results}, indent=4))
+ log.info(json.dumps({"type": "list", "items": results}, indent=4))


def backend_create_vlv(inst, basedn, log, args):
@@ -735,7 +737,7 @@ def backend_create_vlv(inst, basedn, log, args):
'vlvscope': args.search_scope,
'vlvfilter': args.search_filter}
be.add_vlv_search(args.name, props)
- print("Successfully created new VLV Search entry, now you can add indexes to it.")
+ log.info("Successfully created new VLV Search entry, now you can add indexes to it.")


def backend_edit_vlv(inst, basedn, log, args):
@@ -757,14 +759,14 @@ def backend_edit_vlv(inst, basedn, log, args):
raise ValueError("There are no changes to set in the VLV search entry")
if args.reindex:
vlv_search.reindex()
- print("Successfully updated VLV search entry")
+ log.info("Successfully updated VLV search entry")


def backend_del_vlv(inst, basedn, log, args):
be = _get_backend(inst, args.be_name)
vlv_search = be.get_vlv_searches(vlv_name=args.name)
vlv_search.delete_all()
- print("Successfully deleted VLV search and its indexes")
+ log.info("Successfully deleted VLV search and its indexes")


def backend_create_vlv_index(inst, basedn, log, args):
@@ -773,14 +775,14 @@ def backend_create_vlv_index(inst, basedn, log, args):
vlv_search.add_sort(args.index_name, args.sort)
if args.index_it:
vlv_search.reindex(args.be_name, vlv_index=args.index_name)
- print("Successfully created new VLV index entry")
+ log.info("Successfully created new VLV index entry")


def backend_delete_vlv_index(inst, basedn, log, args):
be = _get_backend(inst, args.be_name)
vlv_search = be.get_vlv_searches(vlv_name=args.parent_name)
vlv_search.delete_sort(args.index_name, args.sort)
- print("Successfully deleted VLV index entry")
+ log.info("Successfully deleted VLV index entry")


def backend_reindex_vlv(inst, basedn, log, args):
@@ -788,7 +790,7 @@ def backend_reindex_vlv(inst, basedn, log, args):
suffix = be.get_suffix()
vlv_search = be.get_vlv_searches(vlv_name=args.parent_name)
vlv_search.reindex(suffix, vlv_index=args.index_name)
- print("Successfully reindexed VLV indexes")
+ log.info("Successfully reindexed VLV indexes")


def create_parser(subparsers):

--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
_______________________________________________
389-commits mailing list -- 389-commits@lists.fedoraproject.org
To unsubscribe send an email to 389-commits-leave@lists.fedoraproject.org
Fedora Code of Conduct: https://docs.fedoraproject.org/en-US/project/code-of-conduct/
List Guidelines: https://fedoraproject.org/wiki/Mailing_list_guidelines
List Archives: https://lists.fedoraproject.org/archives/list/389-commits@lists.fedoraproject.org

No comments:

Post a Comment