Thursday, April 7, 2016

[389-commits] 3 commits - dirsrvtests/tests ldap/servers

dirsrvtests/tests/suites/dna_plugin/dna_test.py | 169 +++++++++++-
dirsrvtests/tests/tickets/ticket48342_test.py | 322 ++++++++++++++++++++++++
ldap/servers/plugins/dna/dna.c | 178 ++++++++-----
3 files changed, 595 insertions(+), 74 deletions(-)

New commits:
commit 472a96b512f1b4cf6f5a0a2603c8abe77ba4c173
Author: William Brown <firstyear@redhat.com>
Date: Mon Apr 4 09:31:46 2016 +1000

Ticket 48342 - Prevent transaction abort if a transaction has not begun

Bug Description: Transactions may have been aborted if they had not begun yet
due to a logic issue in dna_update_config_event. Additionally, it was possible
for an operation to fail and the transaction to not be aborted, and for the
transaction to fail to start and the delete to proceed anyway!

Fix Description: Re-arrange and correct the logic around the transaction
in dna_update_config_event. Given this code is always called during startup, we
do not have the same be-txn issues as other areas of dna. This should fix the
transaction logic.

https://fedorahosted.org/389/ticket/48342

Author: wibrown

Review by: tbordaz (Thanks)

diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
index cac0051..2908443 100644
--- a/ldap/servers/plugins/dna/dna.c
+++ b/ldap/servers/plugins/dna/dna.c
@@ -1603,35 +1603,35 @@ dna_update_config_event(time_t event_time, void *arg)
slapi_pblock_set(dna_pb, SLAPI_BACKEND, be);
/* We need to start transaction to avoid the deadlock */
rc = slapi_back_transaction_begin(dna_pb);
- if (rc) {
- slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
- "dna_update_config_event: failed to start transaction\n");
- }
- }
+ if (rc == 0) {

- /* First delete the existing shared config entry. This
- * will allow the entry to be updated for things like
- * port number changes, etc. */
- slapi_delete_internal_set_pb(pb, config_entry->shared_cfg_dn,
- NULL, NULL, getPluginID(), 0);
+ /* First delete the existing shared config entry. This
+ * will allow the entry to be updated for things like
+ * port number changes, etc. */
+ slapi_delete_internal_set_pb(pb, config_entry->shared_cfg_dn,
+ NULL, NULL, getPluginID(), 0);

- /* We don't care about the results */
- slapi_delete_internal_pb(pb);
+ /* We don't care about the results */
+ slapi_delete_internal_pb(pb);

- /* Now force the entry to be recreated */
- dna_update_shared_config(config_entry);
+ /* Now force the entry to be recreated */
+ rc = dna_update_shared_config(config_entry);

- if (dna_pb) {
- if (0 == rc) {
- slapi_back_transaction_commit(dna_pb);
- } else {
- if (slapi_back_transaction_abort(dna_pb) != 0) {
- slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, "dna_update_config_event: failed to abort transaction!\n");
+ if (0 == rc) {
+ slapi_back_transaction_commit(dna_pb);
+ } else {
+ if (slapi_back_transaction_abort(dna_pb) != 0) {
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, "dna_update_config_event: failed to abort transaction!\n");
+ }
}
+ slapi_pblock_destroy(dna_pb);
+ slapi_pblock_init(pb);
+ } else {
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
+ "dna_update_config_event: failed to start transaction\n");
}
- slapi_pblock_destroy(dna_pb);
}
- slapi_pblock_init(pb);
+
}

list = PR_NEXT_LINK(list);


commit eba93f74337bc2e1e3fd4d890a1a17db13588da1
Author: William Brown <firstyear@redhat.com>
Date: Fri Mar 18 13:40:46 2016 +1000

Ticket 48342 - DNA: deadlock during DNA_EXTEND_EXOP_REQUEST_OID

Bug Description: dna.c would deadlock during a range extension request.

This is because of lock ordering issues. In the normal operation, we would take:

* backend lock
* dna_lock

This is because *most* operations in dna are be_txn post operations.

However, when another replica requests a range, they would call the exop request
The issue with this is that the exop request is *not* a be_txn plugin. In fact
exop plugins were never able to have a be_txn type. So the code would take:

* dna_lock
* backend lock

This is how the dead lock starts. We have largely been lucky to not see this in
production before.

Fix Description: This consumes the new RFE for betxn in plugin extendedop.
This means the locks are taken in the correct order, preventing the deadlock.

https://fedorahosted.org/389/ticket/48342

Author: wibrown

Review by: tbordaz and nhosoi (Thanks)

diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
index b0ea2f4..cac0051 100644
--- a/ldap/servers/plugins/dna/dna.c
+++ b/ldap/servers/plugins/dna/dna.c
@@ -277,6 +277,7 @@ static int dna_pre_op(Slapi_PBlock * pb, int modtype);
static int dna_mod_pre_op(Slapi_PBlock * pb);
static int dna_add_pre_op(Slapi_PBlock * pb);
static int dna_extend_exop(Slapi_PBlock *pb);
+static int dna_extend_exop_backend(Slapi_PBlock *pb, Slapi_Backend **target);
static int dna_be_txn_pre_op(Slapi_PBlock *pb, int modtype);
static int dna_be_txn_add_pre_op(Slapi_PBlock *pb);
static int dna_be_txn_mod_pre_op(Slapi_PBlock *pb);
@@ -483,7 +484,7 @@ dna_init(Slapi_PBlock *pb)

if ((status == DNA_SUCCESS) &&
/* the range extension extended operation */
- slapi_register_plugin("extendedop", /* op type */
+ slapi_register_plugin("betxnextendedop", /* op type */
1, /* Enabled */
"dna_init", /* this function desc */
dna_exop_init, /* init func for exop */
@@ -557,7 +558,9 @@ dna_exop_init(Slapi_PBlock * pb)
slapi_pblock_set(pb, SLAPI_PLUGIN_EXT_OP_OIDLIST,
(void *) dna_extend_exop_oid_list) != 0 ||
slapi_pblock_set(pb, SLAPI_PLUGIN_EXT_OP_FN,
- (void *) dna_extend_exop) != 0) {
+ (void *) dna_extend_exop) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_EXT_OP_BACKEND_FN,
+ (void *) dna_extend_exop_backend) != 0) {
slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
"dna_exop_init: failed to register plugin\n");
status = DNA_FAILURE;
@@ -699,6 +702,64 @@ dna_close(Slapi_PBlock * pb)
return DNA_SUCCESS;
}

+static int
+dna_parse_exop_ber(Slapi_PBlock *pb, char **shared_dn)
+{
+ int ret = -1; /* What's a better default? */
+ char *oid = NULL;
+ struct berval *reqdata = NULL;
+ BerElement *tmp_bere = NULL;
+
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM,
+ "----> dna_parse_exop_ber\n");
+
+ /* Fetch the request OID */
+ slapi_pblock_get(pb, SLAPI_EXT_OP_REQ_OID, &oid);
+ if (!oid) {
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
+ "dna_parse_exop_ber: Unable to retrieve request OID.\n");
+ goto out;
+ }
+
+ /* Make sure the request OID is correct. */
+ if (strcmp(oid, DNA_EXTEND_EXOP_REQUEST_OID) != 0) {
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
+ "dna_parse_exop_ber: Received incorrect request OID.\n");
+ goto out;
+ }
+
+ /* Fetch the request data */
+ slapi_pblock_get(pb, SLAPI_EXT_OP_REQ_VALUE, &reqdata);
+ if (!BV_HAS_DATA(reqdata)) {
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
+ "dna_parse_exop_ber: No request data received.\n");
+ goto out;
+ }
+
+ /* decode the exop */
+ tmp_bere = ber_init(reqdata);
+ if (tmp_bere == NULL) {
+ goto out;
+ }
+
+ if (ber_scanf(tmp_bere, "{a}", shared_dn) == LBER_ERROR) {
+ ret = LDAP_PROTOCOL_ERROR;
+ goto out;
+ }
+
+ ret = LDAP_SUCCESS;
+
+out:
+ if (NULL != tmp_bere) {
+ ber_free(tmp_bere, 1);
+ tmp_bere = NULL;
+ }
+
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM,
+ "<---- dna_parse_exop_ber %s\n", *shared_dn);
+ return ret;
+}
+
/*
* Free the global linkedl ist of shared servers
*/
@@ -832,6 +893,7 @@ dna_load_plugin_config(Slapi_PBlock *pb, int use_eventq)
* looking for valid ones. */
dna_parse_config_entry(pb, entries[i], 1);
}
+
dna_unlock();

if (use_eventq) {
@@ -1562,6 +1624,10 @@ dna_update_config_event(time_t event_time, void *arg)
if (dna_pb) {
if (0 == rc) {
slapi_back_transaction_commit(dna_pb);
+ } else {
+ if (slapi_back_transaction_abort(dna_pb) != 0) {
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, "dna_update_config_event: failed to abort transaction!\n");
+ }
}
slapi_pblock_destroy(dna_pb);
}
@@ -4244,16 +4310,41 @@ static int dna_config_check_post_op(Slapi_PBlock * pb)


/****************************************************
+ * Pre Extended Operation, Backend selection
+ ***************************************************/
+static int dna_extend_exop_backend(Slapi_PBlock *pb, Slapi_Backend **target)
+{
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM,
+ "--> dna_parse_exop_backend\n");
+ Slapi_DN *shared_sdn = NULL;
+ char *shared_dn = NULL;
+ int res = -1;
+ /* Parse the oid and what exop wants us to do */
+ res = dna_parse_exop_ber(pb, &shared_dn);
+ if (res != LDAP_SUCCESS) {
+ return res;
+ }
+ if (shared_dn) {
+ shared_sdn = slapi_sdn_new_dn_byref(shared_dn);
+ *target = slapi_be_select(shared_sdn);
+ slapi_sdn_free(&shared_sdn);
+ }
+ res = LDAP_SUCCESS;
+
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM,
+ "<-- dna_parse_exop_backend %d\n", res);
+ return res;
+}
+
+
+/****************************************************
* Range Extension Extended Operation
***************************************************/
static int dna_extend_exop(Slapi_PBlock *pb)
{
int ret = SLAPI_PLUGIN_EXTENDED_NOT_HANDLED;
- struct berval *reqdata = NULL;
- BerElement *tmp_bere = NULL;
char *shared_dn = NULL;
char *bind_dn = NULL;
- char *oid = NULL;
PRUint64 lower = 0;
PRUint64 upper = 0;

@@ -4264,38 +4355,8 @@ static int dna_extend_exop(Slapi_PBlock *pb)
slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM,
"--> dna_extend_exop\n");

- /* Fetch the request OID */
- slapi_pblock_get(pb, SLAPI_EXT_OP_REQ_OID, &oid);
- if (!oid) {
- slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
- "dna_extend_exop: Unable to retrieve request OID.\n");
- goto free_and_return;
- }
-
- /* Make sure the request OID is correct. */
- if (strcmp(oid, DNA_EXTEND_EXOP_REQUEST_OID) != 0) {
- slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
- "dna_extend_exop: Received incorrect request OID.\n");
- goto free_and_return;
- }
-
- /* Fetch the request data */
- slapi_pblock_get(pb, SLAPI_EXT_OP_REQ_VALUE, &reqdata);
- if (!BV_HAS_DATA(reqdata)) {
- slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
- "dna_extend_exop: No request data received.\n");
- goto free_and_return;
- }
-
- /* decode the exop */
- tmp_bere = ber_init(reqdata);
- if (tmp_bere == NULL) {
- goto free_and_return;
- }
-
- if (ber_scanf(tmp_bere, "{a}", &shared_dn) == LBER_ERROR) {
- ret = LDAP_PROTOCOL_ERROR;
- goto free_and_return;
+ if(dna_parse_exop_ber(pb, &shared_dn) != LDAP_SUCCESS) {
+ return ret;
}

slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM,
@@ -4365,10 +4426,6 @@ static int dna_extend_exop(Slapi_PBlock *pb)
free_and_return:
slapi_ch_free_string(&shared_dn);
slapi_ch_free_string(&bind_dn);
- if (NULL != tmp_bere) {
- ber_free(tmp_bere, 1);
- tmp_bere = NULL;
- }

slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM,
"<-- dna_extend_exop\n");
@@ -4530,6 +4587,7 @@ dna_release_range(char *range_dn, PRUint64 *lower, PRUint64 *upper)
if (ret == LDAP_SUCCESS) {
/* Adjust maxval in our cached config and shared config */
config_entry->maxval = *lower - 1;
+ /* This is within the dna_lock, so okay */
dna_notice_allocation(config_entry, config_entry->nextval, 0);
}
}


commit 1066c9ddf7b75b8aef2b1fff1744a6091fa92377
Author: William Brown <firstyear@redhat.com>
Date: Fri Mar 18 13:34:56 2016 +1000

Ticket 48342 - DNA Deadlock test cases

Bug Description: Dna plugin has a deadlock when an extended operation occurs
at the same time as the plugin conducts a be_txn_post operation.

Fix Description: This provides the test cases to reproduce that issue, and adds
a basic dna test case.

https://fedorahosted.org/389/ticket/48342

Author: tbordaz

Review by: wibrown

diff --git a/dirsrvtests/tests/suites/dna_plugin/dna_test.py b/dirsrvtests/tests/suites/dna_plugin/dna_test.py
index 6b0ab8b..e6fb745 100644
--- a/dirsrvtests/tests/suites/dna_plugin/dna_test.py
+++ b/dirsrvtests/tests/suites/dna_plugin/dna_test.py
@@ -22,8 +22,18 @@ from lib389.utils import *
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)

-installation1_prefix = None
-
+USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX
+USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX
+USER3_DN = 'uid=user3,' + DEFAULT_SUFFIX
+BUSER1_DN = 'uid=user1,ou=branch1,' + DEFAULT_SUFFIX
+BUSER2_DN = 'uid=user2,ou=branch2,' + DEFAULT_SUFFIX
+BUSER3_DN = 'uid=user3,ou=branch2,' + DEFAULT_SUFFIX
+BRANCH1_DN = 'ou=branch1,' + DEFAULT_SUFFIX
+BRANCH2_DN = 'ou=branch2,' + DEFAULT_SUFFIX
+GROUP_OU = 'ou=groups,' + DEFAULT_SUFFIX
+PEOPLE_OU = 'ou=people,' + DEFAULT_SUFFIX
+GROUP_DN = 'cn=group,' + DEFAULT_SUFFIX
+CONFIG_AREA = 'nsslapd-pluginConfigArea'

class TopologyStandalone(object):
def __init__(self, standalone):
@@ -33,10 +43,6 @@ class TopologyStandalone(object):

@pytest.fixture(scope="module")
def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
# Creating standalone instance ...
standalone = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_STANDALONE
@@ -51,6 +57,16 @@ def topology(request):
standalone.create()
standalone.open()

+ # Delete each instance in the end
+ def fin():
+ # This is useful for analysing the test env.
+ standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[], encrypt=False, \
+ repl_data=True, outputfile='%s/ldif/%s.ldif' % (standalone.dbdir,SERVERID_STANDALONE ))
+ standalone.clearBackupFS()
+ standalone.backupFS()
+ standalone.delete()
+ request.addfinalizer(fin)
+
# Clear out the tmp dir
standalone.clearTmpDir(__file__)

@@ -70,18 +86,143 @@ def test_dna_(topology):
Write a single test here...
'''

- return
-
+ # stop the plugin, and start it
+ topology.standalone.plugins.disable(name=PLUGIN_DNA)
+ topology.standalone.plugins.enable(name=PLUGIN_DNA)
+
+ CONFIG_DN = 'cn=config,cn=' + PLUGIN_DNA + ',cn=plugins,cn=config'
+
+ log.info('Testing ' + PLUGIN_DNA + '...')
+
+ ############################################################################
+ # Configure plugin
+ ############################################################################
+
+ try:
+ topology.standalone.add_s(Entry((CONFIG_DN, {
+ 'objectclass': 'top dnaPluginConfig'.split(),
+ 'cn': 'config',
+ 'dnatype': 'uidNumber',
+ 'dnafilter': '(objectclass=top)',
+ 'dnascope': DEFAULT_SUFFIX,
+ 'dnaMagicRegen': '-1',
+ 'dnaMaxValue': '50000',
+ 'dnaNextValue': '1'
+ })))
+ except ldap.ALREADY_EXISTS:
+ try:
+ topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'dnaNextValue', '1'),
+ (ldap.MOD_REPLACE, 'dnaMagicRegen', '-1')])
+ except ldap.LDAPError as e:
+ log.fatal('test_dna: Failed to set the DNA plugin: error ' + e.message['desc'])
+ assert False
+ except ldap.LDAPError as e:
+ log.fatal('test_dna: Failed to add config entry: error ' + e.message['desc'])
+ assert False
+
+ # Do we need to restart for the plugin?
+
+ topology.standalone.restart()
+
+ ############################################################################
+ # Test plugin
+ ############################################################################
+
+ try:
+ topology.standalone.add_s(Entry((USER1_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
+ except ldap.LDAPError as e:
+ log.fatal('test_dna: Failed to user1: error ' + e.message['desc'])
+ assert False
+
+ # See if the entry now has the new uidNumber assignment - uidNumber=1
+ try:
+ entries = topology.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=1)')
+ if not entries:
+ log.fatal('test_dna: user1 was not updated - (looking for uidNumber: 1)')
+ assert False
+ except ldap.LDAPError as e:
+ log.fatal('test_dna: Search for user1 failed: ' + e.message['desc'])
+ assert False
+
+ # Test the magic regen value
+ try:
+ topology.standalone.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'uidNumber', '-1')])
+ except ldap.LDAPError as e:
+ log.fatal('test_dna: Failed to set the magic reg value: error ' + e.message['desc'])
+ assert False
+
+ # See if the entry now has the new uidNumber assignment - uidNumber=2
+ try:
+ entries = topology.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=2)')
+ if not entries:
+ log.fatal('test_dna: user1 was not updated (looking for uidNumber: 2)')
+ assert False
+ except ldap.LDAPError as e:
+ log.fatal('test_dna: Search for user1 failed: ' + e.message['desc'])
+ assert False
+
+ ################################################################################
+ # Change the config
+ ################################################################################
+
+ try:
+ topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'dnaMagicRegen', '-2')])
+ except ldap.LDAPError as e:
+ log.fatal('test_dna: Failed to set the magic reg value to -2: error ' + e.message['desc'])
+ assert False
+
+ ################################################################################
+ # Test plugin
+ ################################################################################
+
+ # Test the magic regen value
+ try:
+ topology.standalone.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'uidNumber', '-2')])
+ except ldap.LDAPError as e:
+ log.fatal('test_dna: Failed to set the magic reg value: error ' + e.message['desc'])
+ assert False
+
+ # See if the entry now has the new uidNumber assignment - uidNumber=3
+ try:
+ entries = topology.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=3)')
+ if not entries:
+ log.fatal('test_dna: user1 was not updated (looking for uidNumber: 3)')
+ assert False
+ except ldap.LDAPError as e:
+ log.fatal('test_dna: Search for user1 failed: ' + e.message['desc'])
+ assert False
+
+ ############################################################################
+ # Test plugin dependency
+ ############################################################################
+
+ #test_dependency(inst, PLUGIN_AUTOMEMBER)
+
+ ############################################################################
+ # Cleanup
+ ############################################################################
+
+ try:
+ topology.standalone.delete_s(USER1_DN)
+ except ldap.LDAPError as e:
+ log.fatal('test_dna: Failed to delete test entry1: ' + e.message['desc'])
+ assert False
+
+ topology.standalone.plugins.disable(name=PLUGIN_DNA)
+
+ ############################################################################
+ # Test passed
+ ############################################################################
+
+ log.info('test_dna: PASS\n')

-def test_dna_final(topology):
- topology.standalone.delete()
- log.info('dna test suite PASSED')
+ return


def run_isolated():
- global installation1_prefix
- installation1_prefix = None
-
topo = topology(True)
test_dna_init(topo)
test_dna_(topo)
diff --git a/dirsrvtests/tests/tickets/ticket48342_test.py b/dirsrvtests/tests/tickets/ticket48342_test.py
new file mode 100644
index 0000000..104a938
--- /dev/null
+++ b/dirsrvtests/tests/tickets/ticket48342_test.py
@@ -0,0 +1,322 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+PEOPLE_OU='people'
+PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX)
+MAX_ACCOUNTS=5
+
+class TopologyReplication(object):
+ def __init__(self, master1, master2, master3):
+ master1.open()
+ self.master1 = master1
+ master2.open()
+ self.master2 = master2
+ master3.open()
+ self.master3 = master3
+
+
+@pytest.fixture(scope="module")
+def topology(request):
+ global installation1_prefix
+ if installation1_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+ # Creating master 1...
+ master1 = DirSrv(verbose=False)
+ if installation1_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+ args_instance[SER_HOST] = HOST_MASTER_1
+ args_instance[SER_PORT] = PORT_MASTER_1
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master1.allocate(args_master)
+ instance_master1 = master1.exists()
+ if instance_master1:
+ master1.delete()
+ master1.create()
+ master1.open()
+ master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
+
+ # Creating master 2...
+ master2 = DirSrv(verbose=False)
+ if installation1_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+ args_instance[SER_HOST] = HOST_MASTER_2
+ args_instance[SER_PORT] = PORT_MASTER_2
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master2.allocate(args_master)
+ instance_master2 = master2.exists()
+ if instance_master2:
+ master2.delete()
+ master2.create()
+ master2.open()
+ master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
+
+ # Creating master 3...
+ master3 = DirSrv(verbose=False)
+ if installation1_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+ args_instance[SER_HOST] = HOST_MASTER_3
+ args_instance[SER_PORT] = PORT_MASTER_3
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master3.allocate(args_master)
+ instance_master3 = master3.exists()
+ if instance_master3:
+ master3.delete()
+ master3.create()
+ master3.open()
+ master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_3)
+
+ #
+ # Create all the agreements
+ #
+ # Creating agreement from master 1 to master 2
+ properties = {RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
+ if not m1_m2_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m1_m2_agmt)
+
+ # Creating agreement from master 1 to master 3
+# properties = {RA_NAME: r'meTo_$host:$port',
+# RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+# RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+# RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+# RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+# m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties)
+# if not m1_m3_agmt:
+# log.fatal("Fail to create a master -> master replica agreement")
+# sys.exit(1)
+# log.debug("%s created" % m1_m3_agmt)
+
+ # Creating agreement from master 2 to master 1
+ properties = {RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
+ if not m2_m1_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m2_m1_agmt)
+
+ # Creating agreement from master 2 to master 3
+ properties = {RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m2_m3_agmt = master2.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties)
+ if not m2_m3_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m2_m3_agmt)
+
+ # Creating agreement from master 3 to master 1
+# properties = {RA_NAME: r'meTo_$host:$port',
+# RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+# RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+# RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+# RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+# m3_m1_agmt = master3.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
+# if not m3_m1_agmt:
+# log.fatal("Fail to create a master -> master replica agreement")
+# sys.exit(1)
+# log.debug("%s created" % m3_m1_agmt)
+
+ # Creating agreement from master 3 to master 2
+ properties = {RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m3_m2_agmt = master3.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
+ if not m3_m2_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m3_m2_agmt)
+
+ # Allow the replicas to get situated with the new agreements...
+ time.sleep(5)
+
+ #
+ # Initialize all the agreements
+ #
+ master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
+ master1.waitForReplInit(m1_m2_agmt)
+ time.sleep(5) # just to be safe
+ master2.agreement.init(SUFFIX, HOST_MASTER_3, PORT_MASTER_3)
+ master2.waitForReplInit(m2_m3_agmt)
+
+ # Check replication is working...
+ if master1.testReplication(DEFAULT_SUFFIX, master2):
+ log.info('Replication is working.')
+ else:
+ log.fatal('Replication is not working.')
+ assert False
+
+ # Delete each instance in the end
+ def fin():
+ for master in (master1, master2, master3):
+ # master.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[], encrypt=False, \
+ # repl_data=True, outputfile='%s/ldif/%s.ldif' % (master.dbdir,SERVERID_STANDALONE ))
+ # master.clearBackupFS()
+ # master.backupFS()
+ master.delete()
+ request.addfinalizer(fin)
+
+ # Clear out the tmp dir
+ master1.clearTmpDir(__file__)
+
+ return TopologyReplication(master1, master2, master3)
+
+def _dna_config(server, nextValue=500, maxValue=510):
+ log.info("Add dna plugin config entry...%s" % server)
+
+ try:
+ server.add_s(Entry(('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config', {
+ 'objectclass': 'top dnaPluginConfig'.split(),
+ 'dnaType': 'description',
+ 'dnaMagicRegen': '-1',
+ 'dnaFilter': '(objectclass=posixAccount)',
+ 'dnaScope': 'ou=people,%s' % SUFFIX,
+ 'dnaNextValue': str(nextValue),
+ 'dnaMaxValue' : str(nextValue+maxValue),
+ 'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX
+ })))
+
+ except ldap.LDAPError as e:
+ log.error('Failed to add DNA config entry: error ' + e.message['desc'])
+ assert False
+
+ log.info("Enable the DNA plugin...")
+ try:
+ server.plugins.enable(name=PLUGIN_DNA)
+ except e:
+ log.error("Failed to enable DNA Plugin: error " + e.message['desc'])
+ assert False
+
+ log.info("Restarting the server...")
+ server.stop(timeout=120)
+ time.sleep(1)
+ server.start(timeout=120)
+ time.sleep(3)
+
+def test_ticket4026(topology):
+ """Write your replication testcase here.
+
+ To access each DirSrv instance use: topology.master1, topology.master2,
+ ..., topology.hub1, ..., topology.consumer1, ...
+
+ Also, if you need any testcase initialization,
+ please, write additional fixture for that(include finalizer).
+ """
+
+ try:
+ topology.master1.add_s(Entry((PEOPLE_DN, {
+ 'objectclass': "top extensibleObject".split(),
+ 'ou': 'people'})))
+ except ldap.ALREADY_EXISTS:
+ pass
+
+ topology.master1.add_s(Entry(('ou=ranges,' + SUFFIX, {
+ 'objectclass': 'top organizationalunit'.split(),
+ 'ou': 'ranges'
+ })))
+ for cpt in range(MAX_ACCOUNTS):
+ name = "user%d" % (cpt)
+ topology.master1.add_s(Entry(("uid=%s,%s" %(name, PEOPLE_DN), {
+ 'objectclass': 'top posixAccount extensibleObject'.split(),
+ 'uid': name,
+ 'cn': name,
+ 'uidNumber': '1',
+ 'gidNumber': '1',
+ 'homeDirectory': '/home/%s' % name
+ })))
+
+ # make master3 having more free slots that master2
+ # so master1 will contact master3
+ _dna_config(topology.master1, nextValue=100, maxValue=10)
+ _dna_config(topology.master2, nextValue=200, maxValue=10)
+ _dna_config(topology.master3, nextValue=300, maxValue=3000)
+
+ # Turn on lots of error logging now.
+
+ mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '16384')]
+ #mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '1')]
+ topology.master1.modify_s('cn=config', mod)
+ topology.master2.modify_s('cn=config', mod)
+ topology.master3.modify_s('cn=config', mod)
+
+ # We need to wait for the event in dna.c to fire to start the servers
+ # see dna.c line 899
+ time.sleep(60)
+
+ # add on master1 users with description DNA
+ for cpt in range(10):
+ name = "user_with_desc1_%d" % (cpt)
+ topology.master1.add_s(Entry(("uid=%s,%s" %(name, PEOPLE_DN), {
+ 'objectclass': 'top posixAccount extensibleObject'.split(),
+ 'uid': name,
+ 'cn': name,
+ 'description' : '-1',
+ 'uidNumber': '1',
+ 'gidNumber': '1',
+ 'homeDirectory': '/home/%s' % name
+ })))
+ # give time to negociate master1 <--> master3
+ time.sleep(10)
+ # add on master1 users with description DNA
+ for cpt in range(11,20):
+ name = "user_with_desc1_%d" % (cpt)
+ topology.master1.add_s(Entry(("uid=%s,%s" %(name, PEOPLE_DN), {
+ 'objectclass': 'top posixAccount extensibleObject'.split(),
+ 'uid': name,
+ 'cn': name,
+ 'description' : '-1',
+ 'uidNumber': '1',
+ 'gidNumber': '1',
+ 'homeDirectory': '/home/%s' % name
+ })))
+ log.info('Test complete')
+ # add on master1 users with description DNA
+ mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '16384')]
+ #mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '1')]
+ topology.master1.modify_s('cn=config', mod)
+ topology.master2.modify_s('cn=config', mod)
+ topology.master3.modify_s('cn=config', mod)
+
+ log.info('Test complete')
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+# global installation1_prefix
+# installation1_prefix=None
+# topo = topology(True)
+# test_ticket4026(topo)
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)

--
389 commits mailing list
389-commits@%(host_name)s
http://lists.fedoraproject.org/admin/lists/389-commits@lists.fedoraproject.org

No comments:

Post a Comment