Wednesday, June 8, 2016

[389-commits] Branch '389-ds-base-1.3.4' - 2 commits - dirsrvtests/tests ldap/admin ldap/ldif ldap/servers Makefile.am Makefile.in

Makefile.am | 3
Makefile.in | 3
dirsrvtests/tests/tickets/ticket48755_test.py | 222 ++++++++++++++++
ldap/admin/src/scripts/91reindex.pl.in | 103 +++++++
ldap/admin/src/scripts/91subtreereindex.pl | 8
ldap/admin/src/scripts/setup-ds.res.in | 1
ldap/ldif/template-dse.ldif.in | 1
ldap/servers/plugins/replication/repl5_replica_config.c | 7
ldap/servers/plugins/replication/repl5_tot_protocol.c | 169 ++++++++----
ldap/servers/slapd/back-ldbm/back-ldbm.h | 6
ldap/servers/slapd/back-ldbm/dblayer.c | 5
ldap/servers/slapd/back-ldbm/filterindex.c | 18 -
ldap/servers/slapd/back-ldbm/idl_new.c | 87 +++++-
ldap/servers/slapd/back-ldbm/index.c | 22 +
ldap/servers/slapd/back-ldbm/init.c | 2
ldap/servers/slapd/back-ldbm/misc.c | 1
ldap/servers/slapd/entry.c | 4
ldap/servers/slapd/slap.h | 4
ldap/servers/slapd/slapi-plugin.h | 8
19 files changed, 588 insertions(+), 86 deletions(-)

New commits:
commit 80893ff5f6158ee5b5fdefde250fe22acc5d5c30
Author: Noriko Hosoi <nhosoi@redhat.com>
Date: Thu May 26 14:40:52 2016 -0700

Ticket #48755 - CI test: test case for ticket 48755

Description: moving an entry could make the online init fail
(cherry picked from commit e218a187678133455c4138481c825852e099298a)

diff --git a/dirsrvtests/tests/tickets/ticket48755_test.py b/dirsrvtests/tests/tickets/ticket48755_test.py
new file mode 100644
index 0000000..e3b7b61
--- /dev/null
+++ b/dirsrvtests/tests/tickets/ticket48755_test.py
@@ -0,0 +1,222 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2016 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import os
+import sys
+import time
+import shlex
+import subprocess
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+m1_m2_agmt = None
+
+class TopologyReplication(object):
+ def __init__(self, master1, master2):
+ master1.open()
+ self.master1 = master1
+ master2.open()
+ self.master2 = master2
+
+
+@pytest.fixture(scope="module")
+def topology(request):
+ global installation1_prefix
+ if installation1_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+ # Creating master 1...
+ master1 = DirSrv(verbose=False)
+ if installation1_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+ args_instance[SER_HOST] = HOST_MASTER_1
+ args_instance[SER_PORT] = PORT_MASTER_1
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master1.allocate(args_master)
+ instance_master1 = master1.exists()
+ if instance_master1:
+ master1.delete()
+ master1.create()
+ master1.open()
+ master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
+
+ # Creating master 2...
+ master2 = DirSrv(verbose=False)
+ if installation1_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+ args_instance[SER_HOST] = HOST_MASTER_2
+ args_instance[SER_PORT] = PORT_MASTER_2
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master2.allocate(args_master)
+ instance_master2 = master2.exists()
+ if instance_master2:
+ master2.delete()
+ master2.create()
+ master2.open()
+ master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
+
+ #
+ # Create all the agreements
+ #
+ # Creating agreement from master 1 to master 2
+ properties = {RA_NAME: r'meTo_%s:%s' % (master2.host, master2.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ global m1_m2_agmt
+ m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
+ if not m1_m2_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m1_m2_agmt)
+
+ # Creating agreement from master 2 to master 1
+ properties = {RA_NAME: r'meTo_%s:%s' % (master1.host, master1.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
+ if not m2_m1_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m2_m1_agmt)
+
+ # Allow the replicas to get situated with the new agreements...
+ time.sleep(5)
+
+ #
+ # Initialize all the agreements
+ #
+ master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
+ master1.waitForReplInit(m1_m2_agmt)
+
+ # Check replication is working...
+ if master1.testReplication(DEFAULT_SUFFIX, master2):
+ log.info('Replication is working.')
+ else:
+ log.fatal('Replication is not working.')
+ assert False
+
+ # Delete each instance in the end
+ def fin():
+ master1.delete()
+ master2.delete()
+ request.addfinalizer(fin)
+
+ # Clear out the tmp dir
+ master1.clearTmpDir(__file__)
+
+ return TopologyReplication(master1, master2)
+
+
+@pytest.fixture(scope="module")
+
+def add_ou_entry(server, idx, myparent):
+ name = 'OU%d' % idx
+ dn = 'ou=%s,%s' % (name, myparent)
+ server.add_s(Entry((dn, {'objectclass': ['top', 'organizationalunit'],
+ 'ou': name})))
+
+def add_user_entry(server, idx, myparent):
+ name = 'tuser%d' % idx
+ dn = 'uid=%s,%s' % (name, myparent)
+ server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'organizationalPerson', 'inetorgperson'],
+ 'givenname': 'test',
+ 'sn': 'user%d' % idx,
+ 'cn': 'Test User%d' % idx,
+ 'userpassword': 'password'})))
+
+def test_ticket48755(topology):
+ log.info("Ticket 48755 - moving an entry could make the online init fail")
+
+ M1 = topology.master1
+ M2 = topology.master2
+
+ log.info("Generating DIT_0")
+ idx = 0
+ add_ou_entry(M1, idx, DEFAULT_SUFFIX)
+
+ ou0 = 'ou=OU%d' % idx
+ parent0 = '%s,%s' % (ou0, DEFAULT_SUFFIX)
+ add_ou_entry(M1, idx, parent0)
+
+ parent00 = 'ou=OU%d,%s' % (idx, parent0)
+ for idx in range(0, 9):
+ add_user_entry(M1, idx, parent00)
+
+ log.info('%s => %s => %s => 10 USERS' % (DEFAULT_SUFFIX, parent0, parent00))
+
+ log.info("Generating DIT_1")
+ idx = 1
+ add_ou_entry(M1, idx, DEFAULT_SUFFIX)
+
+ parent1 = 'ou=OU%d,%s' % (idx, DEFAULT_SUFFIX)
+ add_ou_entry(M1, idx, parent1)
+
+ log.info("Moving %s to DIT_1" % parent00)
+ M1.rename_s(parent00, ou0, newsuperior=parent1, delold=1)
+
+ log.info("Moving %s to DIT_1" % parent0)
+ parent01 = '%s,%s' % (ou0, parent1)
+ M1.rename_s(parent0, ou0, newsuperior=parent01, delold=1)
+
+ parent001 = '%s,%s' % (ou0, parent01)
+ log.info("Moving USERS to %s" % parent0)
+ for idx in range(0, 9):
+ name = 'tuser%d' % idx
+ rdn = 'uid=%s' % name
+ dn = 'uid=%s,%s' % (name, parent01)
+ M1.rename_s(dn, rdn, newsuperior=parent001, delold=1)
+
+ log.info('%s => %s => %s => %s => 10 USERS' % (DEFAULT_SUFFIX, parent1, parent01, parent001))
+
+ log.info("Deleting 5 USERS to turn them into a tombstone entries")
+ for idx in range(5, 9):
+ name = 'tuser%d' % idx
+ rdn = 'uid=%s' % name
+ dn = 'uid=%s,%s' % (name, parent001)
+ M1.delete_s(dn)
+
+ log.info("Run Consumer Initialization.")
+ global m1_m2_agmt
+ M1.startReplication_async(m1_m2_agmt)
+ M1.waitForReplInit(m1_m2_agmt)
+
+ m1entries = M1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))')
+ m2entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))')
+
+ log.info("m1entry count - %d", len(m1entries))
+ log.info("m2entry count - %d", len(m2entries))
+
+ assert len(m1entries) == len(m2entries)
+ log.info('PASSED')
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)


commit 4e70ab58808cae57642f459ff00a298e69265e08
Author: Noriko Hosoi <nhosoi@redhat.com>
Date: Thu May 26 15:14:06 2016 -0700

Ticket #48755 - moving an entry could make the online init fail

Bug Description: Online init (aka Total update, bulk import) scans the
primary id2entry db in the order of ID. If Entry A is moved under a
new superior Entry B which was generated after Entry A, when Entry A
is sent to a consumer using online init, its parent entry does not
exist on the consumer and the online init fails.

Fix Description:
- Added a command BACK_INFO_IS_ENTRYRDN to slapi_back_get_info, which
returns the status of entryrdn switch maintained in the backend.

- If slapi_backend_get_info(BACK_INFO_IS_ENTRYRDN) returns true for
the replicated backend, repl5_tot_run searches the entry with the
filter:
(|(parentid>=1)(objectclass=ldapsubentry)(objectclass=nstombstone))
instead of:
(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))".

- In addition, idl_new_range_fetch had to be modified so that ...
* A range search for parentid ignores nsslapd-idlistscanlimit by
setting SLAPI_OP_RANGE_NO_ALLIDS as well as it skips sorting the
IDlist by ID by setting SLAPI_OP_RANGE_NO_IDL_SORT.
* In case SLAPI_OP_RANGE_NO_IDL_SORT is set, idl_new_range_fetch
checks whether the key (in this case parentid) is in the IDlist.
If it exists, the ID is appended. If it does not, the ID is in
the leftover list and appended when the parent ID is found in the
IDlist.

- Increased the version of rdn-format-# in DBVERSION to 3.

- Upgrade script 91reindex.pl.in is added which reindex the parentid
index file in the integer order if the version of rdn-format-# in
DBVERSION is less than 3.

https://fedorahosted.org/389/ticket/48755

Reviewed by wibrown@redhat.com and lkrispen@redhat.com (Thanks, William and Ludwig!)

(cherry picked from commit 3606b78bacce984ab2226755c5921dffac9552c2)

diff --git a/Makefile.am b/Makefile.am
index 8dcdb36..2d19a74 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -589,7 +589,8 @@ update_DATA = ldap/admin/src/scripts/exampleupdate.pl \
ldap/admin/src/scripts/50AES-pbe-plugin.ldif\
ldap/admin/src/scripts/50updateconfig.ldif \
ldap/admin/src/scripts/52updateAESplugin.pl \
- ldap/admin/src/scripts/dnaplugindepends.ldif
+ ldap/admin/src/scripts/dnaplugindepends.ldif \
+ ldap/admin/src/scripts/91reindex.pl

update_SCRIPTS = ldap/admin/src/scripts/exampleupdate.sh

diff --git a/Makefile.in b/Makefile.in
index a29509d..4298c57 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -1997,7 +1997,8 @@ update_DATA = ldap/admin/src/scripts/exampleupdate.pl \
ldap/admin/src/scripts/50AES-pbe-plugin.ldif\
ldap/admin/src/scripts/50updateconfig.ldif \
ldap/admin/src/scripts/52updateAESplugin.pl \
- ldap/admin/src/scripts/dnaplugindepends.ldif
+ ldap/admin/src/scripts/dnaplugindepends.ldif \
+ ldap/admin/src/scripts/91reindex.pl

update_SCRIPTS = ldap/admin/src/scripts/exampleupdate.sh

diff --git a/ldap/admin/src/scripts/91reindex.pl.in b/ldap/admin/src/scripts/91reindex.pl.in
new file mode 100644
index 0000000..c861f64
--- /dev/null
+++ b/ldap/admin/src/scripts/91reindex.pl.in
@@ -0,0 +1,103 @@
+use Mozilla::LDAP::Conn;
+use Mozilla::LDAP::Utils qw(normalizeDN);
+use Mozilla::LDAP::API qw(:constant ldap_url_parse ldap_explode_dn);
+use DSUpdate qw(isOffline);
+
+sub runinst {
+ my ($inf, $inst, $dseldif, $conn) = @_;
+ my $rc, @errs;
+
+ # List of index to be reindexed
+ my @toreindex = qw(parentid);
+ # rdn-format value. See $rdn_format set below.
+ # If equal to or greater than this value, no need to reindex.
+ # If it needs to be unconditionally reindexed, set 0.
+ my @rdnconditions = (4)
+
+ my $config = $conn->search("cn=config", "base", "(objectclass=*)");
+ if (!$config) {
+ push @errs, ['error_finding_config_entry', 'cn=config',
+ $conn->getErrorString()];
+ return @errs;
+ }
+
+ ($rc, @errs) = isOffline($inf, $inst, $conn);
+ if (!$rc) {
+ return @errs;
+ }
+
+ my $reindex = "@sbindir@/db2index -Z $inst";
+ my @errs;
+ my $instconf = $conn->search("cn=ldbm database,cn=plugins,cn=config", "onelevel", "(objectclass=*)");
+ if (!$instconf) {
+ push @errs, ['error_finding_config_entry', 'cn=*,cn=ldbm database,cn=plugins,cn=config', $conn->getErrorString()];
+ return @errs;
+ }
+
+ my $dbconf = $conn->search("cn=config,cn=ldbm database,cn=plugins,cn=config", "base", "(objectclass=*)");
+ if (!$dbconf) {
+ push @errs, ['error_finding_config_entry',
+ 'cn=config,cn=ldbm database,cn=plugins,cn=config',
+ $conn->getErrorString()];
+ return @errs;
+ }
+
+ # Get the value of nsslapd-subtree-rename-switch.
+ my $switch = $dbconf->getValues('nsslapd-subtree-rename-switch');
+ if ("" eq $switch) {
+ return (); # subtree-rename-switch does not exist; do nothing.
+ } elsif ("off" eq $switch || "OFF" eq $switch) {
+ return (); # subtree-rename-switch is OFF; do nothing.
+ }
+
+ my $dbdir = $dbconf->getValues('nsslapd-directory');
+ my $dbversion0 = $dbdir . "/DBVERSION";
+ my $rdn_format = 0;
+ my $dbversionstr = "";
+ if (!open(DBVERSION, "$dbversion0")) {
+ push @errs, ['error_opening_file', $dbversion0, $!];
+ return @errs;
+ } else {
+ while (<DBVERSION>) {
+ if ($_ =~ /rdn-format/) {
+ $rdn_format = 1;
+ $dbversionstr = $_;
+ if ($_ =~ /rdn-format-1/) {
+ $rdn_format = 2;
+ } elsif ($_ =~ /rdn-format-2/) {
+ $rdn_format = 3;
+ } elsif ($_ =~ /rdn-format-3/) {
+ $rdn_format = 4;
+ } elsif ($_ =~ /rdn-format-4/) {
+ $rdn_format = 5;
+ } elsif ($_ =~ /rdn-format-5/) {
+ $rdn_format = 6;
+ } elsif ($_ =~ /rdn-format-/) {
+ # assume greater than -5
+ $rdn_format = 7;
+ }
+ }
+ }
+ close DBVERSION;
+ }
+
+ while ($instconf) {
+ my $backend= $instconf->getValues('cn');
+ if (($backend eq "config") || ($backend eq "monitor")) {
+ goto NEXT;
+ }
+
+ for (my $idx = 0; $ <= $#toreindex; $idx++) {
+ if (0 == $rdnconditions[$idx] || $rdnconditions[$idx] > $rdn_format) {
+ my $rc = system("$reindex -n $backend -t $idx");
+ if ($rc) {
+ push @errs, ["error_reindexng", $idx, $backend, $rc];
+ }
+ }
+ }
+NEXT:
+ $instconf = $conn->nextEntry();
+ }
+
+ return @errs;
+}
diff --git a/ldap/admin/src/scripts/91subtreereindex.pl b/ldap/admin/src/scripts/91subtreereindex.pl
index a031cc1..c4b40a3 100644
--- a/ldap/admin/src/scripts/91subtreereindex.pl
+++ b/ldap/admin/src/scripts/91subtreereindex.pl
@@ -51,14 +51,18 @@ sub runinst {
if ($_ =~ /rdn-format-1/) {
$is_rdn_format = 2;
}
- if ($_ =~ /rdn-format-2/) {
+ elsif ($_ =~ /rdn-format-2/) {
$is_rdn_format = 3;
}
+ elsif ($_ =~ /rdn-format-/) {
+ # assume greater than -2
+ $is_rdn_format = 4;
+ }
}
}
close DBVERSION;

- if (3 == $is_rdn_format) {
+ if (3 <= $is_rdn_format) {
return (); # DB already has the new rdn format.
}

diff --git a/ldap/admin/src/scripts/setup-ds.res.in b/ldap/admin/src/scripts/setup-ds.res.in
index 7134e25..760db6f 100644
--- a/ldap/admin/src/scripts/setup-ds.res.in
+++ b/ldap/admin/src/scripts/setup-ds.res.in
@@ -208,3 +208,4 @@ error_opening_file = Opening file '%s' failed. Error: %s\n
error_format_error = '%s' has invalid format.\n
error_update_not_offline = Error: offline mode selected but the server [%s] is still running.\n
error_update_all = Failed to update all the Directory Server instances.\n
+error_reindexing = Failed to reindex '%s' in backend '%s'. Error: %s\n
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
index 6acbfae..2988cb9 100644
--- a/ldap/ldif/template-dse.ldif.in
+++ b/ldap/ldif/template-dse.ldif.in
@@ -927,6 +927,7 @@ objectclass: nsIndex
cn: parentid
nssystemindex: true
nsindextype: eq
+nsmatchingrule: integerOrderingMatch

dn: cn=seeAlso,cn=default indexes, cn=config,cn=ldbm database,cn=plugins,cn=config
objectclass: top
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
index 4d7135c..fa436ac 100644
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
@@ -405,7 +405,7 @@ replica_config_modify (Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry*
{
if (apply_mods)
replica_set_precise_purging(r, 0);
- }
+ }
else
{
*returncode = LDAP_UNWILLING_TO_PERFORM;
@@ -567,8 +567,7 @@ replica_config_modify (Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry*
{
if (apply_mods)
{
- if (apply_mods && config_attr_value[0])
- {
+ if (config_attr_value[0]) {
PRUint64 on_off = 0;

if (strcasecmp(config_attr_value, "on") == 0){
@@ -587,7 +586,7 @@ replica_config_modify (Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry*
break;
}
replica_set_precise_purging(r, on_off);
- } else if (apply_mods) {
+ } else {
replica_set_precise_purging(r, 0);
}
}
diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c
index d0c4402..03d0c3e 100644
--- a/ldap/servers/plugins/replication/repl5_tot_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c
@@ -323,6 +323,10 @@ repl5_tot_run(Private_Repl_Protocol *prp)
int init_retry = 0;
Replica *replica;
ReplicaId rid = 0; /* Used to create the replica keep alive subentry */
+ Slapi_Entry *suffix = NULL;
+ char **instances = NULL;
+ Slapi_Backend *be = NULL;
+ int is_entryrdn = 0;

PR_ASSERT(NULL != prp);

@@ -354,21 +358,21 @@ retry:
*/
if (rc != ACQUIRE_SUCCESS)
{
- int optype, ldaprc, wait_retry;
- conn_get_error(prp->conn, &optype, &ldaprc);
- if (rc == ACQUIRE_TRANSIENT_ERROR && INIT_RETRY_MAX > init_retry++) {
- wait_retry = init_retry * INIT_RETRY_INT;
- slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to "
- "acquire replica for total update, error: %d,"
+ int optype, ldaprc, wait_retry;
+ conn_get_error(prp->conn, &optype, &ldaprc);
+ if (rc == ACQUIRE_TRANSIENT_ERROR && INIT_RETRY_MAX > init_retry++) {
+ wait_retry = init_retry * INIT_RETRY_INT;
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to "
+ "acquire replica for total update, error: %d,"
" retrying in %d seconds.\n",
- ldaprc, wait_retry);
- DS_Sleep(PR_SecondsToInterval(wait_retry));
- goto retry;
- } else {
- agmt_set_last_init_status(prp->agmt, ldaprc,
- prp->last_acquire_response_code, 0, NULL);
- goto done;
- }
+ ldaprc, wait_retry);
+ DS_Sleep(PR_SecondsToInterval(wait_retry));
+ goto retry;
+ } else {
+ agmt_set_last_init_status(prp->agmt, ldaprc,
+ prp->last_acquire_response_code, 0, NULL);
+ goto done;
+ }
}
else if (prp->terminate)
{
@@ -405,48 +409,121 @@ retry:
and that the order implies that perent entry is always ahead of the
child entry in the list. Otherwise, the consumer would not be
properly updated because bulk import at the moment skips orphand entries. */
- /* XXXggood above assumption may not be valid if orphaned entry moved???? */
+ /* XXXggood above assumption may not be valid if orphaned entry moved???? */

agmt_set_last_init_status(prp->agmt, 0, 0, 0, "Total update in progress");

slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Beginning total update of replica "
- "\"%s\".\n", agmt_get_long_name(prp->agmt));
+ "\"%s\".\n", agmt_get_long_name(prp->agmt));

/* RMREPL - need to send schema here */

pb = slapi_pblock_new ();

- /* we need to provide managedsait control so that referral entries can
- be replicated */
- ctrls = (LDAPControl **)slapi_ch_calloc (3, sizeof (LDAPControl *));
- ctrls[0] = create_managedsait_control ();
- ctrls[1] = create_backend_control(area_sdn);
+ replica = (Replica*) object_get_data(prp->replica_object);
+ /*
+ * Get the info about the entryrdn vs. entrydn from the backend.
+ * If NOT is_entryrdn, its ancestor entries are always found prior to an entry.
+ */
+ rc = slapi_lookup_instance_name_by_suffix((char *)slapi_sdn_get_dn(area_sdn), NULL, &instances, 1);
+ if (rc || !instances) {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to "
+ "get the instance name for the suffix \"%s\".\n", slapi_sdn_get_dn(area_sdn));
+ goto done;
+ }
+ be = slapi_be_select_by_instance_name(instances[0]);
+ if (!be) {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to "
+ "get the instance for the suffix \"%s\".\n", slapi_sdn_get_dn(area_sdn));
+ goto done;
+ }
+ rc = slapi_back_get_info(be, BACK_INFO_IS_ENTRYRDN, (void **)&is_entryrdn);
+ if (is_entryrdn) {
+ /*
+ * Supporting entries out of order -- parent could have a larger id than its children.
+ * Entires are retireved sorted by parentid without the allid threshold.
+ */
+ /* Get suffix */
+ rc = slapi_search_internal_get_entry(area_sdn, NULL, &suffix, repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION));
+ if (rc) {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to "
+ "get the suffix entry \"%s\".\n", slapi_sdn_get_dn(area_sdn));
+ goto done;
+ }

- /* Time to make sure it exists a keep alive subentry for that replica */
- replica = (Replica*) object_get_data(prp->replica_object);
- if (replica)
- {
- rid = replica_get_rid(replica);
- }
- replica_subentry_check(area_sdn, rid);
-
- slapi_search_internal_set_pb (pb, slapi_sdn_get_dn (area_sdn),
- LDAP_SCOPE_SUBTREE, "(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))", NULL, 0, ctrls, NULL,
- repl_get_plugin_identity (PLUGIN_MULTIMASTER_REPLICATION), 0);
-
- cb_data.prp = prp;
- cb_data.rc = 0;
- cb_data.num_entries = 0UL;
- cb_data.sleep_on_busy = 0UL;
- cb_data.last_busy = current_time ();
- cb_data.flowcontrol_detection = 0;
- cb_data.lock = PR_NewLock();
-
- /* This allows during perform_operation to check the callback data
- * especially to do flow contol on delta send msgid / recv msgid
- */
- conn_set_tot_update_cb(prp->conn, (void *) &cb_data);
+ cb_data.prp = prp;
+ cb_data.rc = 0;
+ cb_data.num_entries = 1UL;
+ cb_data.sleep_on_busy = 0UL;
+ cb_data.last_busy = current_time ();
+ cb_data.flowcontrol_detection = 0;
+ cb_data.lock = PR_NewLock();
+
+ /* This allows during perform_operation to check the callback data
+ * especially to do flow contol on delta send msgid / recv msgid
+ */
+ conn_set_tot_update_cb(prp->conn, (void *) &cb_data);
+
+ /* Send suffix first. */
+ rc = send_entry(suffix, (void *)&cb_data);
+ if (rc) {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to "
+ "send the suffix entry \"%s\" to the consumer.\n", slapi_sdn_get_dn(area_sdn));
+ goto done;
+ }
+
+ /* we need to provide managedsait control so that referral entries can
+ be replicated */
+ ctrls = (LDAPControl **)slapi_ch_calloc (3, sizeof (LDAPControl *));
+ ctrls[0] = create_managedsait_control ();
+ ctrls[1] = create_backend_control(area_sdn);
+
+ /* Time to make sure it exists a keep alive subentry for that replica */
+ if (replica)
+ {
+ rid = replica_get_rid(replica);
+ }
+ replica_subentry_check(area_sdn, rid);

+ /* Send the subtree of the suffix in the order of parentid index plus ldapsubentry and nstombstone. */
+ slapi_search_internal_set_pb(pb, slapi_sdn_get_dn (area_sdn),
+ LDAP_SCOPE_SUBTREE, "(|(parentid>=1)(objectclass=ldapsubentry)(objectclass=nstombstone))", NULL, 0, ctrls, NULL,
+ repl_get_plugin_identity (PLUGIN_MULTIMASTER_REPLICATION), 0);
+ cb_data.num_entries = 0UL;
+ } else {
+ /* Original total update */
+ /* we need to provide managedsait control so that referral entries can
+ be replicated */
+ ctrls = (LDAPControl **)slapi_ch_calloc (3, sizeof (LDAPControl *));
+ ctrls[0] = create_managedsait_control ();
+ ctrls[1] = create_backend_control(area_sdn);
+
+ /* Time to make sure it exists a keep alive subentry for that replica */
+ replica = (Replica*) object_get_data(prp->replica_object);
+ if (replica)
+ {
+ rid = replica_get_rid(replica);
+ }
+ replica_subentry_check(area_sdn, rid);
+
+ slapi_search_internal_set_pb (pb, slapi_sdn_get_dn (area_sdn),
+ LDAP_SCOPE_SUBTREE, "(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))", NULL, 0, ctrls, NULL,
+ repl_get_plugin_identity (PLUGIN_MULTIMASTER_REPLICATION), 0);
+
+ cb_data.prp = prp;
+ cb_data.rc = 0;
+ cb_data.num_entries = 0UL;
+ cb_data.sleep_on_busy = 0UL;
+ cb_data.last_busy = current_time ();
+ cb_data.flowcontrol_detection = 0;
+ cb_data.lock = PR_NewLock();
+
+ /* This allows during perform_operation to check the callback data
+ * especially to do flow contol on delta send msgid / recv msgid
+ */
+ conn_set_tot_update_cb(prp->conn, (void *) &cb_data);
+ }
+
/* Before we get started on sending entries to the replica, we need to
* setup things for async propagation:
* 1. Create a thread that will read the LDAP results from the connection.
@@ -470,7 +547,7 @@ retry:
slapi_search_internal_callback_pb (pb, &cb_data /* callback data */,
get_result /* result callback */,
send_entry /* entry callback */,
- NULL /* referral callback*/);
+ NULL /* referral callback*/);

/*
* After completing the sending operation (or optionally failing), we need to clean up
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
index 9499292..2d77a8a 100644
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
@@ -132,7 +132,7 @@ typedef unsigned short u_int16_t;
#define BDB_BACKEND "libback-ldbm" /* This backend plugin */
#define BDB_NEWIDL "newidl" /* new idl format */
#define BDB_RDNFORMAT "rdn-format" /* Subtree rename enabled */
-#define BDB_RDNFORMAT_VERSION "2" /* rdn-format version (by default, 0) */
+#define BDB_RDNFORMAT_VERSION "3" /* rdn-format version (by default, 0) */
#define BDB_DNFORMAT "dn-4514" /* DN format RFC 4514 compliant */
#define BDB_DNFORMAT_VERSION "1" /* DN format version */

@@ -808,11 +808,11 @@ typedef struct _back_search_result_set
/* #define LDBM_ENTRYRDN_OID "2.16.840.1.113730.3.1.2097" */

#define LDBM_ANCESTORID_STR "ancestorid"
-#define LDBM_ENTRYDN_STR "entrydn"
+#define LDBM_ENTRYDN_STR SLAPI_ATTR_ENTRYDN
#define LDBM_ENTRYRDN_STR "entryrdn"
#define LDBM_NUMSUBORDINATES_STR "numsubordinates"
#define LDBM_TOMBSTONE_NUMSUBORDINATES_STR "tombstonenumsubordinates"
-#define LDBM_PARENTID_STR "parentid"
+#define LDBM_PARENTID_STR SLAPI_ATTR_PARENTID

/* Name of psuedo attribute used to track default indexes */
#define LDBM_PSEUDO_ATTR_DEFAULT ".default"
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index 33506f4..9e74d9b 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -7485,6 +7485,11 @@ ldbm_back_get_info(Slapi_Backend *be, int cmd, void **info)
}
break;
}
+ case BACK_INFO_IS_ENTRYRDN:
+ {
+ *(int *)info = entryrdn_get_switch();
+ break;
+ }
default:
break;
}
diff --git a/ldap/servers/slapd/back-ldbm/filterindex.c b/ldap/servers/slapd/back-ldbm/filterindex.c
index 9c14de4..9a7e7be 100644
--- a/ldap/servers/slapd/back-ldbm/filterindex.c
+++ b/ldap/servers/slapd/back-ldbm/filterindex.c
@@ -552,6 +552,7 @@ range_candidates(
struct berval *low = NULL, *high = NULL;
struct berval **lows = NULL, **highs = NULL;
back_txn txn = {NULL};
+ int operator = 0;

LDAPDebug(LDAP_DEBUG_TRACE, "=> range_candidates attr=%s\n", type, 0, 0);

@@ -578,18 +579,21 @@ range_candidates(
}
high = attr_value_lowest(highs, slapi_berval_cmp);
}
-
+ if (entryrdn_get_switch() && !strcasecmp(type, LDBM_PARENTID_STR)) {
+ /* parentid is treated specially that is needed for the bulk import. (See #48755) */
+ operator = SLAPI_OP_RANGE_NO_IDL_SORT|SLAPI_OP_RANGE_NO_ALLIDS;
+ }
if (low == NULL) {
- idl = index_range_read_ext(pb, be, type, (char*)indextype_EQUALITY,
- SLAPI_OP_LESS_OR_EQUAL,
+ operator |= SLAPI_OP_LESS_OR_EQUAL;
+ idl = index_range_read_ext(pb, be, type, (char*)indextype_EQUALITY, operator,
high, NULL, 0, &txn, err, allidslimit);
} else if (high == NULL) {
- idl = index_range_read_ext(pb, be, type, (char*)indextype_EQUALITY,
- SLAPI_OP_GREATER_OR_EQUAL,
+ operator |= SLAPI_OP_GREATER_OR_EQUAL;
+ idl = index_range_read_ext(pb, be, type, (char*)indextype_EQUALITY, operator,
low, NULL, 0, &txn, err, allidslimit);
} else {
- idl = index_range_read_ext(pb, be, type, (char*)indextype_EQUALITY,
- SLAPI_OP_LESS_OR_EQUAL,
+ operator |= SLAPI_OP_LESS_OR_EQUAL;
+ idl = index_range_read_ext(pb, be, type, (char*)indextype_EQUALITY, operator,
low, high, 1, &txn, err, allidslimit);
}

diff --git a/ldap/servers/slapd/back-ldbm/idl_new.c b/ldap/servers/slapd/back-ldbm/idl_new.c
index 25b3bfa..6ca6c96 100644
--- a/ldap/servers/slapd/back-ldbm/idl_new.c
+++ b/ldap/servers/slapd/back-ldbm/idl_new.c
@@ -350,17 +350,31 @@ error:
return idl;
}

+typedef struct _range_id_pair {
+ ID key;
+ ID id;
+} idl_range_id_pair;
/*
* Perform the range search in the idl layer instead of the index layer
* to improve the performance.
*/
+/*
+ * NOTE:
+ * In the total update (bulk import), an entry requires its ancestors already added.
+ * To guarantee it, the range search with parentid is used with setting the flag
+ * SLAPI_OP_RANGE_NO_IDL_SORT in operator.
+ *
+ * If the flag is set,
+ * 1. the IDList is not sorted by the ID.
+ * 2. holding to add an ID to the IDList unless the key is found in the IDList.
+ */
IDList *
idl_new_range_fetch(
- backend *be,
- DB* db,
- DBT *lowerkey,
+ backend *be,
+ DB* db,
+ DBT *lowerkey,
DBT *upperkey,
- DB_TXN *txn,
+ DB_TXN *txn,
struct attrinfo *ai,
int *flag_err,
int allidslimit,
@@ -380,7 +394,7 @@ idl_new_range_fetch(
size_t count = 0;
#ifdef DB_USE_BULK_FETCH
/* beware that a large buffer on the stack might cause a stack overflow on some platforms */
- char buffer[BULK_FETCH_BUFFER_SIZE];
+ char buffer[BULK_FETCH_BUFFER_SIZE];
void *ptr;
DBT dataret;

No comments:

Post a Comment