[arch-commits] Commit in samba/repos (14 files)
Bartłomiej Piotrowski
bpiotrowski at archlinux.org
Thu Apr 26 11:11:41 UTC 2018
Date: Thursday, April 26, 2018 @ 11:11:40
Author: bpiotrowski
Revision: 323031
archrelease: copy trunk to testing-x86_64
Added:
samba/repos/testing-x86_64/
samba/repos/testing-x86_64/PKGBUILD
(from rev 323030, samba/trunk/PKGBUILD)
samba/repos/testing-x86_64/bug13335.patch
(from rev 323030, samba/trunk/bug13335.patch)
samba/repos/testing-x86_64/nmbd.service
(from rev 323030, samba/trunk/nmbd.service)
samba/repos/testing-x86_64/samba.conf
(from rev 323030, samba/trunk/samba.conf)
samba/repos/testing-x86_64/samba.conf.d
(from rev 323030, samba/trunk/samba.conf.d)
samba/repos/testing-x86_64/samba.install
(from rev 323030, samba/trunk/samba.install)
samba/repos/testing-x86_64/samba.logrotate
(from rev 323030, samba/trunk/samba.logrotate)
samba/repos/testing-x86_64/samba.pam
(from rev 323030, samba/trunk/samba.pam)
samba/repos/testing-x86_64/samba.service
(from rev 323030, samba/trunk/samba.service)
samba/repos/testing-x86_64/smbd.service
(from rev 323030, samba/trunk/smbd.service)
samba/repos/testing-x86_64/smbd.socket
(from rev 323030, samba/trunk/smbd.socket)
samba/repos/testing-x86_64/smbd at .service
(from rev 323030, samba/trunk/smbd at .service)
samba/repos/testing-x86_64/winbindd.service
(from rev 323030, samba/trunk/winbindd.service)
------------------+
PKGBUILD | 242 ++++++
bug13335.patch | 2039 +++++++++++++++++++++++++++++++++++++++++++++++++++++
nmbd.service | 12
samba.conf | 2
samba.conf.d | 18
samba.install | 10
samba.logrotate | 5
samba.pam | 3
samba.service | 14
smbd.service | 12
smbd.socket | 9
smbd at .service | 7
winbindd.service | 12
13 files changed, 2385 insertions(+)
Copied: samba/repos/testing-x86_64/PKGBUILD (from rev 323030, samba/trunk/PKGBUILD)
===================================================================
--- testing-x86_64/PKGBUILD (rev 0)
+++ testing-x86_64/PKGBUILD 2018-04-26 11:11:40 UTC (rev 323031)
@@ -0,0 +1,242 @@
+
+# Maintainer: Tobias Powalowski <tpowa at archlinux.org>
+# Contributor: judd <jvinet at zeroflux.org>
+# Contributor: Michael Hansen <zrax0111 gmail com>
+# Contributor: Marco A Rojas <marquicus at gmail.com>
+# Contributor: Netanel Shine <netanel at archlinux.org.il >
+# Contributor: ngoonee <ngoonee.talk at gmail.com>
+# Contributor: Adam Russell <adamlr6+arch at gmail.com>
+# Contributor: Dhananjay Sathe <dhananjaysathe at gmail.com>
+
+pkgbase=samba
+pkgname=('libwbclient' 'smbclient' 'samba')
+pkgver=4.8.1
+pkgrel=1
+arch=(x86_64)
+url="http://www.samba.org"
+license=('GPL3')
+makedepends=('python2' 'docbook-xsl' 'pkg-config' 'libbsd' 'db' 'popt' 'libcups'
+ 'readline' 'tevent' 'acl' 'libldap' 'libcap' 'ldb>=1.1.15' 'krb5' 'pam'
+ 'systemd' 'gamin' 'gnutls>=2.4.1' 'talloc' 'tdb' 'dbus' 'libaio'
+ 'perl-parse-yapp' 'libnsl' 'libtirpc' 'rpcsvc-proto')
+source=(http://us1.samba.org/samba/ftp/stable/${pkgbase}-${pkgver}.tar.gz
+ http://us1.samba.org/samba/ftp/stable/${pkgbase}-${pkgver}.tar.asc
+ samba.logrotate
+ samba.pam
+ samba.conf
+ bug13335.patch)
+validpgpkeys=('52FBC0B86D954B0843324CDC6F33915B6568B7EA') #Samba Distribution Verification Key <samba-bugs at samba.org>
+### UNINSTALL dmapi package before building!!!
+
+prepare() {
+ cd samba-${pkgver}
+ patch -p1 -i ../bug13335.patch
+}
+
+build() {
+ # Use samba-pkg as a staging directory for the split packages
+ # (This is so RPATHS and symlinks are generated correctly via
+ # make install, but the otherwise unsplit pieces can be split)
+ _pkgsrc=${srcdir}/samba-pkg
+ rm -rf ${_pkgsrc}
+ cd ${srcdir}/samba-${pkgver}
+ # change to use python2
+ SAVEIFS=${IFS}
+ IFS=$(echo -en "\n\b")
+ PYTHON_CALLERS="$(find ${srcdir}/samba-${pkgver} -name '*.py')
+$(find ${srcdir}/samba-${pkgver} -name 'wscript*')
+$(find ${srcdir}/samba-${pkgver} -name 'configure.ac')
+$(find ${srcdir}/samba-${pkgver} -name 'upgrade_from_s3')
+$(find ${srcdir}/samba-${pkgver}/buildtools -type f)
+$(find ${srcdir}/samba-${pkgver}/source4/scripting -type f)"
+ sed -i -e "s|/usr/bin/env python$|/usr/bin/env python2|" \
+ -e "s|python-config|python2-config|" \
+ -e "s|bin/python|bin/python2|" \
+ ${PYTHON_CALLERS}
+ IFS=${SAVEIFS}
+
+ export PYTHON=/usr/bin/python2
+_samba4_idmap_modules=idmap_ad,idmap_rid,idmap_adex,idmap_hash,idmap_tdb2
+_samba4_pdb_modules=pdb_tdbsam,pdb_ldap,pdb_ads,pdb_smbpasswd,pdb_wbc_sam,pdb_samba4
+_samba4_auth_modules=auth_unix,auth_wbc,auth_server,auth_netlogond,auth_script,auth_samba4
+ cd ${srcdir}/samba-${pkgver}
+ ./configure --enable-fhs \
+ --prefix=/usr \
+ --sbindir=/usr/bin \
+ --libdir=/usr/lib \
+ --libexecdir=/usr/lib/samba \
+ --localstatedir=/var \
+ --with-configdir=/etc/samba \
+ --with-lockdir=/var/cache/samba \
+ --with-sockets-dir=/var/run/samba \
+ --with-piddir=/var/run \
+ --with-ads \
+ --with-ldap \
+ --with-winbind \
+ --with-acl-support \
+ --with-systemd \
+ --systemd-install-services \
+ --enable-gnutls \
+ --with-pam \
+ --with-pammodulesdir=/usr/lib/security \
+ --bundled-libraries=!tdb,!talloc,!pytalloc-util,!tevent,!popt,!ldb,!pyldb-util \
+ --with-shared-modules=${_samba4_idmap_modules},${_samba4_pdb_modules},${_samba4_auth_modules} \
+ --disable-rpath-install
+
+ # Add this to the options once it's working...
+ #--with-system-mitkrb5 /opt/heimdal
+ make
+ make DESTDIR="${_pkgsrc}/" install
+
+ # This gets skipped somehow
+ if [ ! -e ${_pkgsrc}/usr/bin/smbtar ]; then
+ install -m755 ${srcdir}/samba-${pkgver}/source3/script/smbtar ${_pkgsrc}/usr/bin/
+ fi
+}
+
+package_libwbclient() {
+pkgdesc="Samba winbind client library"
+depends=('glibc' 'libbsd')
+ # Use samba-pkg as a staging directory for the split packages
+ # (This is so RPATHS and symlinks are generated correctly via
+ # make install, but the otherwise unsplit pieces can be split)
+ _pkgsrc=${srcdir}/samba-pkg
+ install -d -m755 ${pkgdir}/usr/lib
+ mv ${_pkgsrc}/usr/lib/libwbclient*.so* ${pkgdir}/usr/lib/
+
+ install -d -m755 ${pkgdir}/usr/lib/samba
+ mv ${_pkgsrc}/usr/lib/samba/libwinbind-client*.so* ${pkgdir}/usr/lib/samba/
+ mv ${_pkgsrc}/usr/lib/samba/libreplace-samba4.so* ${pkgdir}/usr/lib/samba/
+
+ install -d -m755 ${pkgdir}/usr/lib/pkgconfig
+ mv ${_pkgsrc}/usr/lib/pkgconfig/wbclient.pc ${pkgdir}/usr/lib/pkgconfig/
+
+ install -d -m755 ${pkgdir}/usr/include/samba-4.0
+ mv ${_pkgsrc}/usr/include/samba-4.0/wbclient.h ${pkgdir}/usr/include/samba-4.0/
+}
+
+package_smbclient() {
+pkgdesc="Tools to access a server's filespace and printers via SMB"
+depends=('popt' 'cifs-utils' 'tdb' "libwbclient>=$pkgver" 'ldb'
+ 'tevent' 'libgcrypt' 'python2' 'talloc' 'readline' 'gnutls'
+ 'libbsd' 'libldap' 'libcups' 'gamin' 'libarchive' 'libnsl')
+
+ _smbclient_bins=('smbclient' 'rpcclient' 'smbspool'
+ 'smbtree' 'smbcacls' 'smbcquotas' 'smbget' 'net'
+ 'nmblookup' 'smbtar')
+ # Use samba-pkg as a staging directory for the split packages
+ # (This is so RPATHS and symlinks are generated correctly via
+ # make install, but the otherwise unsplit pieces can be split)
+ _pkgsrc=${srcdir}/samba-pkg
+ install -d -m755 ${pkgdir}/usr/bin
+ for bin in ${_smbclient_bins[@]}; do
+ mv ${_pkgsrc}/usr/bin/${bin} ${pkgdir}/usr/bin/
+ done
+
+ # smbclient binaries link to the majority of the samba
+ # libs, so this is a shortcut instead of resolving the
+ # whole dependency tree by hand
+ install -d -m755 ${pkgdir}/usr/lib
+ for lib in ${_pkgsrc}/usr/lib/lib*.so*; do
+ mv ${lib} ${pkgdir}/usr/lib/
+ done
+
+ install -d -m755 ${pkgdir}/usr/lib/samba
+ for lib in ${_pkgsrc}/usr/lib/samba/lib*.so*; do
+ mv ${lib} ${pkgdir}/usr/lib/samba/
+ done
+
+ install -d -m755 ${pkgdir}/usr/lib/pkgconfig
+ mv ${_pkgsrc}/usr/lib/pkgconfig/smbclient.pc ${pkgdir}/usr/lib/pkgconfig/
+ mv ${_pkgsrc}/usr/lib/pkgconfig/netapi.pc ${pkgdir}/usr/lib/pkgconfig/
+
+ install -d -m755 ${pkgdir}/usr/share/man/man1
+ install -d -m755 ${pkgdir}/usr/share/man/man7
+ install -d -m755 ${pkgdir}/usr/share/man/man8
+ for bin in ${_smbclient_bins[@]}; do
+ if [ -e ${_pkgsrc}/usr/share/man/man1/${bin}.1 ]; then
+ mv ${_pkgsrc}/usr/share/man/man1/${bin}.1 ${pkgdir}/usr/share/man/man1/
+ fi
+ if [ -e ${_pkgsrc}/usr/share/man/man8/${bin}.8 ]; then
+ mv ${_pkgsrc}/usr/share/man/man8/${bin}.8 ${pkgdir}/usr/share/man/man8/
+ fi
+ done
+ mv ${_pkgsrc}/usr/share/man/man7/libsmbclient.7 ${pkgdir}/usr/share/man/man7/
+
+ install -d -m755 ${pkgdir}/usr/include/samba-4.0
+ mv ${_pkgsrc}/usr/include/samba-4.0/libsmbclient.h ${pkgdir}/usr/include/samba-4.0/
+ mv ${_pkgsrc}/usr/include/samba-4.0/netapi.h ${pkgdir}/usr/include/samba-4.0/
+
+ mkdir -p ${pkgdir}/usr/lib/cups/backend
+ ln -sf /usr/bin/smbspool ${pkgdir}/usr/lib/cups/backend/smb
+}
+
+package_samba() {
+pkgdesc="SMB Fileserver and AD Domain server"
+depends=('db>=4.7' 'popt' 'libcups' 'libcap>=2.16' 'gamin' 'gnutls>=2.4.1'
+ 'talloc' 'ldb' 'libbsd' 'python2' 'iniparser' 'tdb' 'libaio' 'perl-parse-yapp' "smbclient>=$pkgver" "gpgme")
+backup=(etc/logrotate.d/samba
+ etc/pam.d/samba
+ etc/samba/smb.conf
+ etc/xinetd.d/swat
+ etc/conf.d/samba)
+install=samba.install
+ # Use samba-pkg as a staging directory for the split packages
+ # (This is so RPATHS and symlinks are generated correctly via
+ # make install, but the otherwise unsplit pieces can be split)
+ _pkgsrc=${srcdir}/samba-pkg
+ # Everything that libwbclient and smbclient didn't install goes
+ # into the samba package...
+ mv ${_pkgsrc}/* ${pkgdir}/
+ rmdir ${_pkgsrc}
+
+ _pyver=`python2 -c 'import sys; print(sys.version[:3])'`
+
+ find ${pkgdir}/usr/lib/python${_pyver}/site-packages/ -name '*.py' | \
+ xargs sed -i "s|#!/usr/bin/env python$|#!/usr/bin/env python2|"
+ find ${pkgdir}/usr/bin ${pkgdir}/usr/bin -type f -executable | \
+ xargs sed -i "s|#!/usr/bin/env python$|#!/usr/bin/env python2|"
+
+ # Make admin scripts look in the right place for the samba python module
+ for script in bin/samba_dnsupdate bin/samba_kcc bin/samba_spnupdate \
+ bin/samba_upgradedns bin/samba-tool
+ do
+ sed -i "/^sys\.path\.insert/ a\
+sys.path.insert(0, '/usr/lib/python${_pyver}/site-packages')" \
+ ${pkgdir}/usr/${script}
+ done
+
+ # packaging/wscript_build to use /etc/conf.d
+ sed -i -e '/^EnvironmentFile/ s/sysconfig/conf.d/' "${pkgdir}"/usr/lib/systemd/system/*.service
+ install -d -m755 "${pkgdir}"/etc/conf.d
+ install -m644 "${srcdir}"/samba-${pkgver}/packaging/systemd/samba.sysconfig "${pkgdir}"/etc/conf.d/samba
+
+ # create ephemeral dirs via tmpfiles rather than shipping them in package
+ install -D -m644 ${srcdir}/samba.conf ${pkgdir}/usr/lib/tmpfiles.d/samba.conf
+ # create config dir
+ install -d -m755 ${pkgdir}/etc/samba
+
+ mkdir -p ${pkgdir}/etc/samba/private
+ chmod 700 ${pkgdir}/etc/samba/private
+
+ install -D -m644 ${srcdir}/samba.logrotate ${pkgdir}/etc/logrotate.d/samba
+ install -D -m644 ${srcdir}/samba.pam ${pkgdir}/etc/pam.d/samba
+
+ # winbind krb5 locator
+ mkdir -p ${pkgdir}/usr/lib/krb5/plugins/libkrb5
+ mv ${pkgdir}/usr/lib/*.so ${pkgdir}/usr/lib/krb5/plugins/libkrb5/
+
+ # spool directory
+ install -d -m1777 ${pkgdir}/var/spool/samba
+
+ rm -rf ${pkgdir}/var/run
+
+ # copy ldap example
+ install -D -m644 ${srcdir}/samba-${pkgver}/examples/LDAP/samba.schema ${pkgdir}/usr/share/doc/samba/examples/LDAP/samba.schema
+}
+md5sums=('3cdb976a892bc036bfb61eeb97f68450'
+ 'SKIP'
+ '995621522c6ec9b68c1b858ceed627ed'
+ '96f82c38f3f540b53f3e5144900acf17'
+ '49abd7b719e3713a3f75a8a50958e381'
+ '86db2a3247a79d195341759da4c27454')
Copied: samba/repos/testing-x86_64/bug13335.patch (from rev 323030, samba/trunk/bug13335.patch)
===================================================================
--- testing-x86_64/bug13335.patch (rev 0)
+++ testing-x86_64/bug13335.patch 2018-04-26 11:11:40 UTC (rev 323031)
@@ -0,0 +1,2039 @@
+From b26d21cebda58547818e24927131a3c62955bd9c Mon Sep 17 00:00:00 2001
+From: Gary Lockyer <gary at catalyst.net.nz>
+Date: Wed, 21 Feb 2018 15:12:40 +1300
+Subject: [PATCH 1/5] ldb_tdb: Add tests for truncated index keys
+
+Tests for the index truncation code as well as the GUID index
+format in general.
+
+Covers truncation of both the DN and equality search keys.
+
+Signed-off-by: Gary Lockyer <gary at catalyst.net.nz>
+Reviewed-by: Douglas Bagnall <douglas.bagnall at catalyst.net.nz>
+Reviewed-by: Andrew Bartlett <abartlet at samba.org>
+
+Autobuild-User(master): Andrew Bartlett <abartlet at samba.org>
+Autobuild-Date(master): Sat Mar 3 09:58:40 CET 2018 on sn-devel-144
+
+BUG: https://bugzilla.samba.org/show_bug.cgi?id=13335
+
+(cherry picked into 4.8 and cut down to operate without truncated
+index values from master commit 4c0c888b571d4c21ab267024178353925a8c087c
+by Andrew Bartlett)
+---
+ lib/ldb/tests/python/index.py | 1007 +++++++++++++++++++++++++++++++++++++++++
+ lib/ldb/wscript | 2 +-
+ 2 files changed, 1008 insertions(+), 1 deletion(-)
+ create mode 100755 lib/ldb/tests/python/index.py
+
+diff --git a/lib/ldb/tests/python/index.py b/lib/ldb/tests/python/index.py
+new file mode 100755
+index 00000000000..cd3735b5625
+--- /dev/null
++++ b/lib/ldb/tests/python/index.py
+@@ -0,0 +1,1007 @@
++#!/usr/bin/env python
++#
++# Tests for truncated index keys
++#
++# Copyright (C) Andrew Bartlett <abartlet at samba.org> 2018
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program. If not, see <http://www.gnu.org/licenses/>.
++#
++"""Tests for index keys
++
++This is a modified version of the test from master for databases such
++as lmdb have a maximum key length, instead just checking that the
++GUID index code still operates correctly.
++
++Many of the test names are therefore incorrect, but are retained
++to keep the code easy to backport into if more tested are added in
++master.
++
++"""
++
++import os
++from unittest import TestCase
++import sys
++import ldb
++import shutil
++
++PY3 = sys.version_info > (3, 0)
++
++
++def tempdir():
++ import tempfile
++ try:
++ dir_prefix = os.path.join(os.environ["SELFTEST_PREFIX"], "tmp")
++ except KeyError:
++ dir_prefix = None
++ return tempfile.mkdtemp(dir=dir_prefix)
++
++
++def contains(result, dn):
++ if result is None:
++ return False
++
++ for r in result:
++ if str(r["dn"]) == dn:
++ return True
++ return False
++
++
++class MaxIndexKeyLengthTests(TestCase):
++ def checkGuids(self, key, guids):
++ #
++ # This check relies on the current implementation where the indexes
++ # are in the same database as the data.
++ #
++ # It checks that the index record exists, unless guids is None then
++ # the record must not exist. And the it contains the expected guid
++ # entries.
++ #
++ # The caller needs to provide the GUID's in the expected order
++ #
++ res = self.l.search(
++ base=key,
++ scope=ldb.SCOPE_BASE)
++ if guids is None:
++ self.assertEqual(len(res), 0)
++ return
++ self.assertEqual(len(res), 1)
++
++ # The GUID index format has only one value
++ index = res[0]["@IDX"][0]
++ self.assertEqual(len(guids), len(index))
++ self.assertEqual(guids, index)
++
++ def tearDown(self):
++ shutil.rmtree(self.testdir)
++ super(MaxIndexKeyLengthTests, self).tearDown()
++
++ # Ensure the LDB is closed now, so we close the FD
++ del(self.l)
++
++ def setUp(self):
++ super(MaxIndexKeyLengthTests, self).setUp()
++ self.testdir = tempdir()
++ self.filename = os.path.join(self.testdir, "key_len_test.ldb")
++ # Note that the maximum key length is set to 50
++ self.l = ldb.Ldb(self.filename,
++ options=[
++ "modules:rdn_name",
++ "max_key_len_for_self_test:50"])
++ self.l.add({"dn": "@ATTRIBUTES",
++ "uniqueThing": "UNIQUE_INDEX"})
++ self.l.add({"dn": "@INDEXLIST",
++ "@IDXATTR": [b"uniqueThing", b"notUnique"],
++ "@IDXONE": [b"1"],
++ "@IDXGUID": [b"objectUUID"],
++ "@IDX_DN_GUID": [b"GUID"]})
++
++ # Test that DN's longer the maximum key length can be added
++ # and that duplicate DN's are rejected correctly
++ def test_add_long_dn_add(self):
++ #
++ # For all entries the DN index key gets truncated to
++ # @INDEX:@IDXDN:OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA
++ #
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=ORG",
++ "objectUUID": b"0123456789abcdef"})
++
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=COM",
++ "objectUUID": b"0123456789abcde0"})
++
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=GOV",
++ "objectUUID": b"0123456789abcde1"})
++
++ # Key is equal to max length does not get inserted into the truncated
++ # key namespace
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA",
++ "objectUUID": b"0123456789abcde5"})
++ self.checkGuids(
++ "@INDEX:@IDXDN:OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA",
++ b"0123456789abcde5")
++
++ # This key should not get truncated, as it's one character less than
++ # max, and will not be in the truncate name space
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXX,DC=SAMBA",
++ "objectUUID": b"0123456789abcde7"})
++ self.checkGuids(
++ "@INDEX:@IDXDN:OU=A_LONG_DNXXXXXXXXXXXXXX,DC=SAMBA",
++ b"0123456789abcde7")
++
++ try:
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=ORG",
++ "objectUUID": b"0123456789abcde2"})
++ except ldb.LdbError as err:
++ enum = err.args[0]
++ self.assertEqual(enum, ldb.ERR_ENTRY_ALREADY_EXISTS)
++
++ try:
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=COM",
++ "objectUUID": b"0123456789abcde3"})
++ except ldb.LdbError as err:
++ enum = err.args[0]
++ self.assertEqual(enum, ldb.ERR_ENTRY_ALREADY_EXISTS)
++
++ try:
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=GOV",
++ "objectUUID": b"0123456789abcde4"})
++ except ldb.LdbError as err:
++ enum = err.args[0]
++ self.assertEqual(enum, ldb.ERR_ENTRY_ALREADY_EXISTS)
++
++ try:
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA",
++ "objectUUID": b"0123456789abcde6"})
++ except ldb.LdbError as err:
++ enum = err.args[0]
++ self.assertEqual(enum, ldb.ERR_ENTRY_ALREADY_EXISTS)
++
++ try:
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXX,DC=SAMBA",
++ "objectUUID": b"0123456789abcde8"})
++ except ldb.LdbError as err:
++ enum = err.args[0]
++ self.assertEqual(enum, ldb.ERR_ENTRY_ALREADY_EXISTS)
++
++ def test_rename_truncated_dn_keys(self):
++ # For all entries the DN index key gets truncated to
++ # 0 1 2 3 4 5
++ # 12345678901234567890123456789012345678901234567890
++ # @INDEX:@IDXDN:OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=ORG",
++ "objectUUID": b"0123456789abcdef"})
++
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=COM",
++ "objectUUID": b"0123456789abcde0"})
++
++ # Non conflicting rename, should succeed
++ self.l.rename("OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=ORG",
++ "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=GOV")
++
++ # Conflicting rename should fail
++ try:
++ self.l.rename("OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=COM",
++ "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=GOV")
++ except ldb.LdbError as err:
++ enum = err.args[0]
++ self.assertEqual(enum, ldb.ERR_ENTRY_ALREADY_EXISTS)
++
++ def test_delete_truncated_dn_keys(self):
++ #
++ # For all entries the DN index key gets truncated to
++ # 0 1 2 3 4 5
++ # 12345678901234567890123456789012345678901234567890
++ # @INDEX:@IDXDN:OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA
++ #
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=ORG",
++ "objectUUID": b"0123456789abcdef"})
++
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=GOV",
++ "objectUUID": b"0123456789abcde1"})
++
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA",
++ "objectUUID": b"0123456789abcde5"})
++ self.checkGuids(
++ "@INDEX:@IDXDN:OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA",
++ b"0123456789abcde5")
++
++ # Try to delete a non existent DN with a truncated key
++ try:
++ self.l.delete("OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=COM")
++ except ldb.LdbError as err:
++ enum = err.args[0]
++ self.assertEqual(enum, ldb.ERR_NO_SUCH_OBJECT)
++ # Ensure that non of the other truncated DN's got deleted
++ res = self.l.search(
++ base="OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=ORG")
++ self.assertEqual(len(res), 1)
++
++ res = self.l.search(
++ base="OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=GOV")
++ self.assertEqual(len(res), 1)
++
++ # Ensure that the non truncated DN did not get deleted
++ res = self.l.search(
++ base="OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA")
++ self.assertEqual(len(res), 1)
++
++ # Check the indexes are correct
++ self.checkGuids(
++ "@INDEX:@IDXDN:OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA",
++ b"0123456789abcde5")
++
++ # delete an existing entry
++ self.l.delete("OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=ORG")
++
++ # Ensure it got deleted
++ res = self.l.search(base="OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=ORG")
++ self.assertEqual(len(res), 0)
++
++ # Ensure that non of the other truncated DN's got deleted
++ res = self.l.search(base="OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=GOV")
++ self.assertEqual(len(res), 1)
++
++ # Ensure the non truncated entry did not get deleted.
++ res = self.l.search(base="OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA")
++ self.assertEqual(len(res), 1)
++
++ # Check the indexes are correct
++ self.checkGuids(
++ "@INDEX:@IDXDN:OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA",
++ b"0123456789abcde5")
++
++ # delete an existing entry
++ self.l.delete("OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=GOV")
++
++ # Ensure it got deleted
++ res = self.l.search(base="OU=A_LONG_DNXXXXXXXXXXX,DC=SAMBA,DC=GOV")
++ self.assertEqual(len(res), 0)
++
++ # Ensure that non of the non truncated DN's got deleted
++ res = self.l.search(base="OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA")
++ self.assertEqual(len(res), 1)
++ # Check the indexes are correct
++ self.checkGuids(
++ "@INDEX:@IDXDN:OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA",
++ b"0123456789abcde5")
++
++ # delete an existing entry
++ self.l.delete("OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA")
++
++ # Ensure it got deleted
++ res = self.l.search(base="OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBAxxx")
++ self.assertEqual(len(res), 0)
++ self.checkGuids(
++ "@INDEX:@IDXDN:OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA",
++ None)
++
++ def test_search_truncated_dn_keys(self):
++ #
++ # For all entries the DN index key gets truncated to
++ # 0 1 2 3 4 5
++ # 12345678901234567890123456789012345678901234567890
++ # @INDEX:@IDXDN:OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA
++ #
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=ORG",
++ "objectUUID": b"0123456789abcdef"})
++
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=GOV",
++ "objectUUID": b"0123456789abcde1"})
++
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA",
++ "objectUUID": b"0123456789abcde5"})
++ self.checkGuids(
++ "@INDEX:@IDXDN:OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA",
++ b"0123456789abcde5")
++
++ res = self.l.search(base="OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=ORG")
++ self.assertEqual(len(res), 1)
++
++ res = self.l.search(base="OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=GOV")
++ self.assertEqual(len(res), 1)
++
++ res = self.l.search(base="OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA")
++ self.assertEqual(len(res), 1)
++
++ res = self.l.search(base="OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=COM")
++ self.assertEqual(len(res), 0)
++
++ res = self.l.search(base="OU=A_LONG_DNXXXXXXXXXXXX,DC=SAMBA,DC=GOV")
++ self.assertEqual(len(res), 0)
++
++ # Non existent, key one less than truncation limit
++ res = self.l.search(base="OU=A_LONG_DNXXXXXXXXXXXXXX,DC=SAMBA")
++ self.assertEqual(len(res), 0)
++
++ def test_search_dn_filter_truncated_dn_keys(self):
++ #
++ # For all entries the DN index key gets truncated to
++ # 0 1 2 3 4 5
++ # 12345678901234567890123456789012345678901234567890
++ # @INDEX:@IDXDN:OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA
++ #
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=ORG",
++ "objectUUID": b"0123456789abcdef"})
++
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=GOV",
++ "objectUUID": b"0123456789abcde1"})
++
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA",
++ "objectUUID": b"0123456789abcde5"})
++ self.checkGuids(
++ "@INDEX:@IDXDN:OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA",
++ b"0123456789abcde5")
++
++ res = self.l.search(
++ expression="dn=OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=ORG")
++ self.assertEqual(len(res), 1)
++
++ res = self.l.search(
++ expression="dn=OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=GOV")
++ self.assertEqual(len(res), 1)
++
++ res = self.l.search(
++ expression="dn=OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA")
++ self.assertEqual(len(res), 1)
++
++ res = self.l.search(
++ expression="dn=OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=COM")
++ self.assertEqual(len(res), 0)
++
++ res = self.l.search(
++ expression="dn=OU=A_LONG_DNXXXXXXXXXXXX,DC=SAMBA,DC=GOV")
++ self.assertEqual(len(res), 0)
++
++ # Non existent, key one less than truncation limit
++ res = self.l.search(
++ expression="dn=OU=A_LONG_DNXXXXXXXXXXXXXX,DC=SAMBA")
++ self.assertEqual(len(res), 0)
++
++ def test_search_one_level_truncated_dn_keys(self):
++ #
++ # Except for the base DN's
++ # all entries the DN index key gets truncated to
++ # 0 1 2 3 4 5
++ # 12345678901234567890123456789012345678901234567890
++ # @INDEX:@IDXDN:OU=??,OU=A_LONG_DN_ONE_LVLX,DC=SAMBA
++ # The base DN-s truncate to
++ # @INDEX:@IDXDN:OU=A_LONG_DN_ONE_LVLX,DC=SAMBA,DC=OR
++ #
++ self.l.add({"dn": "OU=A_LONG_DN_ONE_LVLX,DC=SAMBA,DC=OR1",
++ "objectUUID": b"0123456789abcdef"})
++ self.l.add({"dn": "OU=A_LONG_DN_ONE_LVLX,DC=SAMBA,DC=OR2",
++ "objectUUID": b"0123456789abcd1f"})
++
++ self.l.add({"dn": "OU=01,OU=A_LONG_DN_ONE_LVLX,DC=SAMBA,DC=OR1",
++ "objectUUID": b"0123456789abcde1"})
++ self.l.add({"dn": "OU=01,OU=A_LONG_DN_ONE_LVLX,DC=SAMBA,DC=OR2",
++ "objectUUID": b"0123456789abcd11"})
++
++ self.l.add({"dn": "OU=02,OU=A_LONG_DN_ONE_LVLX,DC=SAMBA,DC=OR1",
++ "objectUUID": b"0123456789abcde2"})
++ self.l.add({"dn": "OU=02,OU=A_LONG_DN_ONE_LVLX,DC=SAMBA,DC=OR2",
++ "objectUUID": b"0123456789abcdf2"})
++
++ self.l.add({"dn": "OU=03,OU=A_LONG_DN_ONE_LVLX,DC=SAMBA,DC=OR1",
++ "objectUUID": b"0123456789abcde3"})
++ self.l.add({"dn": "OU=03,OU=A_LONG_DN_ONE_LVLX,DC=SAMBA,DC=OR2",
++ "objectUUID": b"0123456789abcd13"})
++
++ # This key is not truncated as it's the max_key_len
++ self.l.add({"dn": "OU=01,OU=A_LONG_DN_ONE_LVLX,DC=SAMBA",
++ "objectUUID": b"0123456789abcde7"})
++ self.checkGuids(
++ "@INDEX:@IDXDN:OU=01,OU=A_LONG_DN_ONE_LVLX,DC=SAMBA",
++ b"0123456789abcde7")
++
++ res = self.l.search(base="OU=A_LONG_DN_ONE_LVLX,DC=SAMBA,DC=OR1",
++ scope=ldb.SCOPE_ONELEVEL)
++ self.assertEqual(len(res), 3)
++ self.assertTrue(
++ contains(res, "OU=01,OU=A_LONG_DN_ONE_LVLX,DC=SAMBA,DC=OR1"))
++ self.assertTrue(
++ contains(res, "OU=02,OU=A_LONG_DN_ONE_LVLX,DC=SAMBA,DC=OR1"))
++ self.assertTrue(
++ contains(res, "OU=03,OU=A_LONG_DN_ONE_LVLX,DC=SAMBA,DC=OR1"))
++
++ res = self.l.search(base="OU=A_LONG_DN_ONE_LVLX,DC=SAMBA,DC=OR2",
++ scope=ldb.SCOPE_ONELEVEL)
++ self.assertEqual(len(res), 3)
++ self.assertTrue(
++ contains(res, "OU=01,OU=A_LONG_DN_ONE_LVLX,DC=SAMBA,DC=OR2"))
++ self.assertTrue(
++ contains(res, "OU=02,OU=A_LONG_DN_ONE_LVLX,DC=SAMBA,DC=OR2"))
++ self.assertTrue(
++ contains(res, "OU=03,OU=A_LONG_DN_ONE_LVLX,DC=SAMBA,DC=OR2"))
++
++ res = self.l.search(base="OU=A_LONG_DN_ONE_LVLX,DC=SAMBA",
++ scope=ldb.SCOPE_ONELEVEL)
++ self.assertEqual(len(res), 1)
++ self.assertTrue(
++ contains(res, "OU=01,OU=A_LONG_DN_ONE_LVLX,DC=SAMBA"))
++
++ def test_search_sub_tree_truncated_dn_keys(self):
++ #
++ # Except for the base DN's
++ # all entries the DN index key gets truncated to
++ # 0 1 2 3 4 5
++ # 12345678901234567890123456789012345678901234567890
++ # @INDEX:@IDXDN:OU=??,OU=A_LONG_DN_SUB_TREE,DC=SAMBA
++ # The base DN-s truncate to
++ # @INDEX:@IDXDN:OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR
++ #
++ self.l.add({"dn": "OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR1",
++ "objectUUID": b"0123456789abcdef"})
++ self.l.add({"dn": "OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR2",
++ "objectUUID": b"0123456789abcde4"})
++ self.l.add({"dn": "OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR3",
++ "objectUUID": b"0123456789abcde8"})
++
++ self.l.add({"dn": "OU=01,OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR1",
++ "objectUUID": b"0123456789abcde1"})
++ self.l.add({"dn": "OU=01,OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR2",
++ "objectUUID": b"0123456789abcde5"})
++
++ self.l.add({"dn": "OU=02,OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR1",
++ "objectUUID": b"0123456789abcde2"})
++ self.l.add({"dn": "OU=02,OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR2",
++ "objectUUID": b"0123456789abcde6"})
++
++ self.l.add({"dn": "OU=03,OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR1",
++ "objectUUID": b"0123456789abcde3"})
++
++ self.l.add({"dn": "OU=03,OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR2",
++ "objectUUID": b"0123456789abcde7"})
++
++ self.l.add({"dn": "OU=04,OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR4",
++ "objectUUID": b"0123456789abcde9"})
++
++ res = self.l.search(base="OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR1",
++ scope=ldb.SCOPE_SUBTREE)
++ self.assertEqual(len(res), 4)
++ self.assertTrue(
++ contains(res, "OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR1"))
++ self.assertTrue(
++ contains(res, "OU=01,OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR1"))
++ self.assertTrue(
++ contains(res, "OU=02,OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR1"))
++ self.assertTrue(
++ contains(res, "OU=03,OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR1"))
++
++ res = self.l.search(base="OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR2",
++ scope=ldb.SCOPE_SUBTREE)
++ self.assertEqual(len(res), 4)
++ self.assertTrue(
++ contains(res, "OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR2"))
++ self.assertTrue(
++ contains(res, "OU=01,OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR2"))
++ self.assertTrue(
++ contains(res, "OU=02,OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR2"))
++ self.assertTrue(
++ contains(res, "OU=03,OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR2"))
++
++ res = self.l.search(base="OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR3",
++ scope=ldb.SCOPE_SUBTREE)
++ self.assertEqual(len(res), 1)
++ self.assertTrue(
++ contains(res, "OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR3"))
++
++ res = self.l.search(base="OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR4",
++ scope=ldb.SCOPE_SUBTREE)
++ self.assertEqual(len(res), 1)
++ self.assertTrue(
++ contains(res, "OU=04,OU=A_LONG_DN_SUB_TREE,DC=SAMBA,DC=OR4"))
++
++ def test_search_base_truncated_dn_keys(self):
++ #
++ # For all entries the DN index key gets truncated to
++ # 0 1 2 3 4 5
++ # 12345678901234567890123456789012345678901234567890
++ # @INDEX:@IDXDN:OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA
++ #
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=ORG",
++ "objectUUID": b"0123456789abcdef"})
++
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=GOV",
++ "objectUUID": b"0123456789abcde1"})
++
++ self.l.add({"dn": "OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA",
++ "objectUUID": b"0123456789abcde5"})
++ self.checkGuids(
++ "@INDEX:@IDXDN:OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA",
++ b"0123456789abcde5")
++
++ res = self.l.search(
++ base="OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=ORG",
++ scope=ldb.SCOPE_BASE)
++ self.assertEqual(len(res), 1)
++
++ res = self.l.search(
++ base="OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=GOV",
++ scope=ldb.SCOPE_BASE)
++ self.assertEqual(len(res), 1)
++
++ res = self.l.search(
++ base="OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA",
++ scope=ldb.SCOPE_BASE)
++ self.assertEqual(len(res), 1)
++
++ res = self.l.search(
++ base="OU=A_LONG_DNXXXXXXXXXXXXXXX,DC=SAMBA,DC=COM",
++ scope=ldb.SCOPE_BASE)
++ self.assertEqual(len(res), 0)
++
++ res = self.l.search(
++ base="OU=A_LONG_DNXXXXXXXXXXXX,DC=SAMBA,DC=GOV",
++ scope=ldb.SCOPE_BASE)
++ self.assertEqual(len(res), 0)
++
++ # Non existent, key one less than truncation limit
++ res = self.l.search(
++ base="OU=A_LONG_DNXXXXXXXXXXXXXX,DC=SAMBA",
++ scope=ldb.SCOPE_BASE)
++ self.assertEqual(len(res), 0)
++
++ #
++ # Test non unique index searched with truncated keys
++ #
++ def test_index_truncated_keys(self):
++ # 0 1 2 3 4 5
++ # 12345678901234567890123456789012345678901234567890
++ # @INDEX:NOTUNIQUE:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
++
++ eq_max = b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
++ gt_max = b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
++ lt_max = b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
++ # > than max length and differs in values that will be truncated
++ gt_max_b = b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"
++
++ # Add two entries with the same value, key length = max so no
++ # truncation.
++ self.l.add({"dn": "OU=01,OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG",
++ "notUnique": eq_max,
++ "objectUUID": b"0123456789abcde0"})
++ self.checkGuids(
++ "@INDEX:NOTUNIQUE:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
++ b"0123456789abcde0")
++
++ self.l.add({"dn": "OU=02,OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG",
++ "notUnique": eq_max,
++ "objectUUID": b"0123456789abcde1"})
++ self.checkGuids(
++ "@INDEX:NOTUNIQUE:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
++ b"0123456789abcde0" + b"0123456789abcde1")
++
++ #
++ # An entry outside the tree
++ #
++ self.l.add({"dn": "OU=10,OU=SEARCH_NON_UNIQUE01,DC=SAMBA,DC=ORG",
++ "notUnique": eq_max,
++ "objectUUID": b"0123456789abcd11"})
++ self.checkGuids(
++ "@INDEX:NOTUNIQUE:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
++ b"0123456789abcd11" + b"0123456789abcde0" + b"0123456789abcde1")
++
++ # Key longer than max so should get truncated to same key as
++ # the previous two entries
++ self.l.add({"dn": "OU=03,OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG",
++ "notUnique": gt_max,
++ "objectUUID": b"0123456789abcde2"})
++
++ # Key longer than max so should get truncated to same key as
++ # the previous entries but differs in the chars after max length
++ self.l.add({"dn": "OU=23,OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG",
++ "notUnique": gt_max_b,
++ "objectUUID": b"0123456789abcd22"})
++ #
++ # An entry outside the tree
++ #
++ self.l.add({"dn": "OU=11,OU=SEARCH_NON_UNIQUE01,DC=SAMBA,DC=ORG",
++ "notUnique": gt_max,
++ "objectUUID": b"0123456789abcd12"})
++
++ # Key shorter than max
++ #
++ self.l.add({"dn": "OU=04,OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG",
++ "notUnique": lt_max,
++ "objectUUID": b"0123456789abcde3"})
++ self.checkGuids(
++ "@INDEX:NOTUNIQUE:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
++ b"0123456789abcde3")
++ #
++ # An entry outside the tree
++ #
++ self.l.add({"dn": "OU=12,OU=SEARCH_NON_UNIQUE01,DC=SAMBA,DC=ORG",
++ "notUnique": lt_max,
++ "objectUUID": b"0123456789abcd13"})
++ self.checkGuids(
++ "@INDEX:NOTUNIQUE:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
++ b"0123456789abcd13" + b"0123456789abcde3")
++
++ #
++ # search for target is max value not truncated
++ # should return ou's 01, 02
++ #
++ expression = "(notUnique=" + eq_max.decode('ascii') + ")"
++ res = self.l.search(base="OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG",
++ scope=ldb.SCOPE_ONELEVEL,
++ expression=expression)
++ self.assertEqual(len(res), 2)
++ self.assertTrue(
++ contains(res, "OU=01,OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG"))
++ self.assertTrue(
++ contains(res, "OU=02,OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG"))
++ #
++ # search for target is max value not truncated
++ # search one level up the tree, scope is ONE_LEVEL
++ # So should get no matches
++ #
++ expression = "(notUnique=" + eq_max.decode('ascii') + ")"
++ res = self.l.search(base="DC=SAMBA,DC=ORG",
++ scope=ldb.SCOPE_ONELEVEL,
++ expression=expression)
++ self.assertEqual(len(res), 0)
++ #
++ # search for target is max value not truncated
++ # search one level up the tree, scope is SUBTREE
++ # So should get 3 matches
++ #
++ res = self.l.search(base="DC=SAMBA,DC=ORG",
++ scope=ldb.SCOPE_SUBTREE,
++ expression=expression)
++ self.assertEqual(len(res), 3)
++ self.assertTrue(
++ contains(res, "OU=01,OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG"))
++ self.assertTrue(
++ contains(res, "OU=02,OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG"))
++ self.assertTrue(
++ contains(res, "OU=10,OU=SEARCH_NON_UNIQUE01,DC=SAMBA,DC=ORG"))
++ #
++ # search for target is max value + 1 so truncated
++ # should return ou 23 as it's gt_max_b being searched for
++ #
++ expression = "(notUnique=" + gt_max_b.decode('ascii') + ")"
++ res = self.l.search(base="OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG",
++ scope=ldb.SCOPE_ONELEVEL,
++ expression=expression)
++ self.assertEqual(len(res), 1)
++ self.assertTrue(
++ contains(res, "OU=23,OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG"))
++
++ #
++ # search for target is max value + 1 so truncated
++ # should return ou 03 as it's gt_max being searched for
++ #
++ expression = "(notUnique=" + gt_max.decode('ascii') + ")"
++ res = self.l.search(base="OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG",
++ scope=ldb.SCOPE_ONELEVEL,
++ expression=expression)
++ self.assertEqual(len(res), 1)
++ self.assertTrue(
++ contains(res, "OU=03,OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG"))
++
++ #
++ # scope one level and one level up one level up should get no matches
++ #
++ res = self.l.search(base="DC=SAMBA,DC=ORG",
++ scope=ldb.SCOPE_ONELEVEL,
++ expression=expression)
++ self.assertEqual(len(res), 0)
++ #
++ # scope sub tree and one level up one level up should get 2 matches
++ #
++ res = self.l.search(base="DC=SAMBA,DC=ORG",
++ scope=ldb.SCOPE_SUBTREE,
++ expression=expression)
++ self.assertEqual(len(res), 2)
++ self.assertTrue(
++ contains(res, "OU=03,OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG"))
++ self.assertTrue(
++ contains(res, "OU=11,OU=SEARCH_NON_UNIQUE01,DC=SAMBA,DC=ORG"))
++
++ #
++ # search for target is max value - 1 so not truncated
++ # should return ou 04
++ #
++ expression = "(notUnique=" + lt_max.decode('ascii') + ")"
++ res = self.l.search(base="OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG",
++ scope=ldb.SCOPE_ONELEVEL,
++ expression=expression)
++ self.assertEqual(len(res), 1)
++ self.assertTrue(
++ contains(res, "OU=04,OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG"))
++
++ #
++ # scope one level and one level up one level up should get no matches
++ #
++ res = self.l.search(base="DC=SAMBA,DC=ORG",
++ scope=ldb.SCOPE_ONELEVEL,
++ expression=expression)
++ self.assertEqual(len(res), 0)
++
++ #
++ # scope sub tree and one level up one level up should get 2 matches
++ #
++ res = self.l.search(base="DC=SAMBA,DC=ORG",
++ scope=ldb.SCOPE_SUBTREE,
++ expression=expression)
++ self.assertEqual(len(res), 2)
++ self.assertTrue(
++ contains(res, "OU=04,OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG"))
++ self.assertTrue(
++ contains(res, "OU=12,OU=SEARCH_NON_UNIQUE01,DC=SAMBA,DC=ORG"))
++
++ #
++ # Test adding to non unique index with identical multivalued index
++ # attributes
++ #
++ def test_index_multi_valued_identical_keys(self):
++ # 0 1 2 3 4 5
++ # 12345678901234567890123456789012345678901234567890
++ # @INDEX:NOTUNIQUE:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
++ as_eq_max = b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
++ bs_eq_max = b"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
++
++ try:
++ self.l.add({"dn": "OU=01,OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG",
++ "notUnique": [bs_eq_max, as_eq_max, as_eq_max],
++ "objectUUID": b"0123456789abcde0"})
++ self.fail("Exception not thrown")
++ except ldb.LdbError as e:
++ code = e.args[0]
++ self.assertEqual(ldb.ERR_ATTRIBUTE_OR_VALUE_EXISTS, code)
++
++ #
++ # Test non unique index with multivalued index attributes
++ # searched with non truncated keys
++ #
++ def test_search_index_multi_valued_truncated_keys(self):
++ # 0 1 2 3 4 5
++ # 12345678901234567890123456789012345678901234567890
++ # @INDEX:NOTUNIQUE:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
++
++ aa_gt_max = b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
++ ab_gt_max = b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"
++ bb_gt_max = b"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
++
++ self.l.add({"dn": "OU=01,OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG",
++ "notUnique": [aa_gt_max, ab_gt_max, bb_gt_max],
++ "objectUUID": b"0123456789abcde0"})
++
++ expression = "(notUnique=" + aa_gt_max.decode('ascii') + ")"
++ res = self.l.search(base="OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG",
++ scope=ldb.SCOPE_ONELEVEL,
++ expression=expression)
++ self.assertEqual(len(res), 1)
++ self.assertTrue(
++ contains(res, "OU=01,OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG"))
++
++ expression = "(notUnique=" + ab_gt_max.decode('ascii') + ")"
++ res = self.l.search(base="OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG",
++ scope=ldb.SCOPE_ONELEVEL,
++ expression=expression)
++ self.assertEqual(len(res), 1)
++ self.assertTrue(
++ contains(res, "OU=01,OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG"))
++
++ expression = "(notUnique=" + bb_gt_max.decode('ascii') + ")"
++ res = self.l.search(base="OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG",
++ scope=ldb.SCOPE_ONELEVEL,
++ expression=expression)
++ self.assertEqual(len(res), 1)
++ self.assertTrue(
++ contains(res, "OU=01,OU=SEARCH_NON_UNIQUE,DC=SAMBA,DC=ORG"))
++
++ #
++ # Test deletion of records with non unique index with multivalued index
++ # attributes
++ # replicate this to test modify with modify flags i.e. DELETE, REPLACE
++ #
++ def test_delete_index_multi_valued_truncated_keys(self):
++ # 0 1 2 3 4 5
++ # 12345678901234567890123456789012345678901234567890
++ # @INDEX:NOTUNIQUE:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
++
++ aa_gt_max = b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
++ ab_gt_max = b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"
++ bb_gt_max = b"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
++ cc_gt_max = b"cccccccccccccccccccccccccccccccccc"
++
++ self.l.add({"dn": "OU=01,OU=DELETE_NON_UNIQUE,DC=SAMBA,DC=ORG",
++ "notUnique": [aa_gt_max, ab_gt_max, bb_gt_max],
++ "objectUUID": b"0123456789abcde0"})
++ self.l.add({"dn": "OU=02,OU=DELETE_NON_UNIQUE,DC=SAMBA,DC=ORG",
++ "notUnique": [aa_gt_max, ab_gt_max, cc_gt_max],
++ "objectUUID": b"0123456789abcde1"})
++
++ res = self.l.search(
++ base="DC=SAMBA,DC=ORG",
++ expression="(notUnique=" + aa_gt_max.decode("ascii") + ")")
++ self.assertEqual(2, len(res))
++ self.assertTrue(
++ contains(res, "OU=01,OU=DELETE_NON_UNIQUE,DC=SAMBA,DC=ORG"))
++ self.assertTrue(
++ contains(res, "OU=02,OU=DELETE_NON_UNIQUE,DC=SAMBA,DC=ORG"))
++
++ self.l.delete("OU=02,OU=DELETE_NON_UNIQUE,DC=SAMBA,DC=ORG")
++
++ self.l.delete("OU=01,OU=DELETE_NON_UNIQUE,DC=SAMBA,DC=ORG")
++
++ #
++ # Test modification of records with non unique index with multivalued index
++ # attributes
++ #
++ def test_modify_index_multi_valued_truncated_keys(self):
++ # 0 1 2 3 4 5
++ # 12345678901234567890123456789012345678901234567890
++ # @INDEX:NOTUNIQUE:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
++
++ aa_gt_max = b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
++ ab_gt_max = b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"
++ bb_gt_max = b"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
++ cc_gt_max = b"cccccccccccccccccccccccccccccccccc"
++
++ self.l.add({"dn": "OU=01,OU=MODIFY_NON_UNIQUE,DC=SAMBA,DC=ORG",
++ "notUnique": [aa_gt_max, ab_gt_max, bb_gt_max],
++ "objectUUID": b"0123456789abcde0"})
++ self.l.add({"dn": "OU=02,OU=MODIFY_NON_UNIQUE,DC=SAMBA,DC=ORG",
++ "notUnique": [aa_gt_max, ab_gt_max, cc_gt_max],
++ "objectUUID": b"0123456789abcde1"})
++
++ res = self.l.search(
++ base="DC=SAMBA,DC=ORG",
++ expression="(notUnique=" + aa_gt_max.decode("ascii") + ")")
++ self.assertEquals(2, len(res))
++ self.assertTrue(
++ contains(res, "OU=01,OU=MODIFY_NON_UNIQUE,DC=SAMBA,DC=ORG"))
++ self.assertTrue(
++ contains(res, "OU=02,OU=MODIFY_NON_UNIQUE,DC=SAMBA,DC=ORG"))
++
++ #
++ # Modify that does not change the indexed attribute
++ #
++ msg = ldb.Message()
++ msg.dn = ldb.Dn(self.l, "OU=01,OU=MODIFY_NON_UNIQUE,DC=SAMBA,DC=ORG")
++ msg["notUnique"] = ldb.MessageElement(
++ [aa_gt_max, ab_gt_max, bb_gt_max],
++ ldb.FLAG_MOD_REPLACE,
++ "notUnique")
++ self.l.modify(msg)
++ #
++ # As the modify is replacing the attribute with the same contents
++ # there should be no changes to the indexes.
++ #
++
++ #
++ # Modify that removes a value from the indexed attribute
++ #
++ msg = ldb.Message()
++ msg.dn = ldb.Dn(self.l, "OU=01,OU=MODIFY_NON_UNIQUE,DC=SAMBA,DC=ORG")
++ msg["notUnique"] = ldb.MessageElement(
++ [aa_gt_max, bb_gt_max],
++ ldb.FLAG_MOD_REPLACE,
++ "notUnique")
++ self.l.modify(msg)
++
++ #
++ # Modify that does a constrained delete the indexed attribute
++ #
++ msg = ldb.Message()
++ msg.dn = ldb.Dn(self.l, "OU=02,OU=MODIFY_NON_UNIQUE,DC=SAMBA,DC=ORG")
++ msg["notUnique"] = ldb.MessageElement(
++ [ab_gt_max],
++ ldb.FLAG_MOD_DELETE,
++ "notUnique")
++ self.l.modify(msg)
++
++ #
++ # Modify that does an unconstrained delete the indexed attribute
++ #
++ msg = ldb.Message()
++ msg.dn = ldb.Dn(self.l, "OU=02,OU=MODIFY_NON_UNIQUE,DC=SAMBA,DC=ORG")
++ msg["notUnique"] = ldb.MessageElement(
++ [],
++ ldb.FLAG_MOD_DELETE,
++ "notUnique")
++ self.l.modify(msg)
++
++ #
++ # Modify that adds a value to the indexed attribute
++ #
++ msg = ldb.Message()
++ msg.dn = ldb.Dn(self.l, "OU=02,OU=MODIFY_NON_UNIQUE,DC=SAMBA,DC=ORG")
++ msg["notUnique"] = ldb.MessageElement(
++ [cc_gt_max],
++ ldb.FLAG_MOD_ADD,
++ "notUnique")
++ self.l.modify(msg)
++
++ #
++ # Modify that adds a values to the indexed attribute
++ #
++ msg = ldb.Message()
++ msg.dn = ldb.Dn(self.l, "OU=02,OU=MODIFY_NON_UNIQUE,DC=SAMBA,DC=ORG")
++ msg["notUnique"] = ldb.MessageElement(
++ [aa_gt_max, ab_gt_max],
++ ldb.FLAG_MOD_ADD,
++ "notUnique")
++ self.l.modify(msg)
++
++ #
++ # Test Sub tree searches when checkBaseOnSearch is enabled and the
++ # DN indexes are truncated and collide.
++ #
++ def test_check_base_on_search_truncated_dn_keys(self):
++ #
++ # Except for the base DN's
++ # all entries the DN index key gets truncated to
++ # 0 1 2 3 4 5
++ # 12345678901234567890123456789012345678901234567890
++ # @INDEX:@IDXDN:OU=??,OU=CHECK_BASE_DN_XXXX,DC=SAMBA
++ # The base DN-s truncate to
++ # @INDEX:@IDXDN:OU=CHECK_BASE_DN_XXXX,DC=SAMBA,DC=OR
++ #
++ checkbaseonsearch = {"dn": "@OPTIONS",
++ "checkBaseOnSearch": b"TRUE"}
++ self.l.add(checkbaseonsearch)
++
++ self.l.add({"dn": "OU=CHECK_BASE_DN_XXXX,DC=SAMBA,DC=OR1",
++ "objectUUID": b"0123456789abcdef"})
++ self.l.add({"dn": "OU=CHECK_BASE_DN_XXXX,DC=SAMBA,DC=OR2",
++ "objectUUID": b"0123456789abcdee"})
++
++ self.l.add({"dn": "OU=01,OU=CHECK_BASE_DN_XXXX,DC=SAMBA,DC=OR1",
++ "objectUUID": b"0123456789abcdec"})
++ self.l.add({"dn": "OU=01,OU=CHECK_BASE_DN_XXXX,DC=SAMBA,DC=OR2",
++ "objectUUID": b"0123456789abcdeb"})
++ self.l.add({"dn": "OU=01,OU=CHECK_BASE_DN_XXXX,DC=SAMBA,DC=OR3",
++ "objectUUID": b"0123456789abcded"})
++
++ self.l.add({"dn": "OU=02,OU=CHECK_BASE_DN_XXXX,DC=SAMBA,DC=OR1",
++ "objectUUID": b"0123456789abcde0"})
++ self.l.add({"dn": "OU=02,OU=CHECK_BASE_DN_XXXX,DC=SAMBA,DC=OR2",
++ "objectUUID": b"0123456789abcde1"})
++ self.l.add({"dn": "OU=02,OU=CHECK_BASE_DN_XXXX,DC=SAMBA,DC=OR3",
++ "objectUUID": b"0123456789abcde2"})
++
++ res = self.l.search(base="OU=CHECK_BASE_DN_XXXX,DC=SAMBA,DC=OR1",
++ scope=ldb.SCOPE_SUBTREE)
++ self.assertEqual(len(res), 3)
++ self.assertTrue(
++ contains(res, "OU=CHECK_BASE_DN_XXXX,DC=SAMBA,DC=OR1"))
++ self.assertTrue(
++ contains(res, "OU=01,OU=CHECK_BASE_DN_XXXX,DC=SAMBA,DC=OR1"))
++ self.assertTrue(
++ contains(res, "OU=02,OU=CHECK_BASE_DN_XXXX,DC=SAMBA,DC=OR1"))
++
++ res = self.l.search(base="OU=CHECK_BASE_DN_XXXX,DC=SAMBA,DC=OR2",
++ scope=ldb.SCOPE_SUBTREE)
++ self.assertEqual(len(res), 3)
++ self.assertTrue(
++ contains(res, "OU=CHECK_BASE_DN_XXXX,DC=SAMBA,DC=OR2"))
++ self.assertTrue(
++ contains(res, "OU=01,OU=CHECK_BASE_DN_XXXX,DC=SAMBA,DC=OR2"))
++ self.assertTrue(
++ contains(res, "OU=02,OU=CHECK_BASE_DN_XXXX,DC=SAMBA,DC=OR2"))
++
++ try:
++ res = self.l.search(base="OU=CHECK_BASE_DN_XXXX,DC=SAMBA,DC=OR3",
++ scope=ldb.SCOPE_SUBTREE)
++ self.fail("Expected exception no thrown")
++ except ldb.LdbError as e:
++ code = e.args[0]
++ self.assertEqual(ldb.ERR_NO_SUCH_OBJECT, code)
++
++if __name__ == '__main__':
++ import unittest
++ unittest.TestProgram()
+diff --git a/lib/ldb/wscript b/lib/ldb/wscript
+index 6a204c0e42a..e14fa63ec2c 100644
+--- a/lib/ldb/wscript
++++ b/lib/ldb/wscript
+@@ -374,7 +374,7 @@ def test(ctx):
+ if not os.path.exists(tmp_dir):
+ os.mkdir(tmp_dir)
+ pyret = samba_utils.RUN_PYTHON_TESTS(
+- ['tests/python/api.py'],
++ ['tests/python/api.py', 'tests/python/index.py'],
+ extra_env={'SELFTEST_PREFIX': test_prefix})
+ print("Python testsuite returned %d" % pyret)
+
+--
+2.14.3
+
+
+From 418a6caaf3a72c1c33e767487d4f106d7b98c5ab Mon Sep 17 00:00:00 2001
+From: Andrew Bartlett <abartlet at samba.org>
+Date: Mon, 26 Mar 2018 16:01:13 +1300
+Subject: [PATCH 2/5] ldb_tdb: Ensure we can not commit an index that is
+ corrupt due to partial re-index
+
+The re-index traverse can abort part-way though and we need to ensure
+that the transaction is never committed as that will leave an un-useable db.
+
+BUG: https://bugzilla.samba.org/show_bug.cgi?id=13335
+
+Signed-off-by: Andrew Bartlett <abartlet at samba.org>
+Reviewed-by: Gary Lockyer <gary at catalyst.net.nz>
+(cherry picked from commit e481e4f30f4dc540f6f129b4f2faea48ee195673)
+---
+ lib/ldb/ldb_tdb/ldb_tdb.c | 30 ++++++++++++++++++++++++++++++
+ lib/ldb/ldb_tdb/ldb_tdb.h | 2 ++
+ 2 files changed, 32 insertions(+)
+
+diff --git a/lib/ldb/ldb_tdb/ldb_tdb.c b/lib/ldb/ldb_tdb/ldb_tdb.c
+index 16e4b8ea26e..a530a454b29 100644
+--- a/lib/ldb/ldb_tdb/ldb_tdb.c
++++ b/lib/ldb/ldb_tdb/ldb_tdb.c
+@@ -410,6 +410,10 @@ static int ltdb_modified(struct ldb_module *module, struct ldb_dn *dn)
+ ret = ltdb_cache_reload(module);
+ }
+
++ if (ret != LDB_SUCCESS) {
++ ltdb->reindex_failed = true;
++ }
++
+ return ret;
+ }
+
+@@ -1404,9 +1408,17 @@ static int ltdb_start_trans(struct ldb_module *module)
+
+ ltdb_index_transaction_start(module);
+
++ ltdb->reindex_failed = false;
++
+ return LDB_SUCCESS;
+ }
+
++/*
++ * Forward declaration to allow prepare_commit to in fact abort the
++ * transaction
++ */
++static int ltdb_del_trans(struct ldb_module *module);
++
+ static int ltdb_prepare_commit(struct ldb_module *module)
+ {
+ int ret;
+@@ -1417,6 +1429,24 @@ static int ltdb_prepare_commit(struct ldb_module *module)
+ return LDB_SUCCESS;
+ }
+
++ /*
++ * Check if the last re-index failed.
++ *
++ * This can happen if for example a duplicate value was marked
++ * unique. We must not write a partial re-index into the DB.
++ */
++ if (ltdb->reindex_failed) {
++ /*
++ * We must instead abort the transaction so we get the
++ * old values and old index back
++ */
++ ltdb_del_trans(module);
++ ldb_set_errstring(ldb_module_get_ctx(module),
++ "Failure during re-index, so "
++ "transaction must be aborted.");
++ return LDB_ERR_OPERATIONS_ERROR;
++ }
++
+ ret = ltdb_index_transaction_commit(module);
+ if (ret != LDB_SUCCESS) {
+ tdb_transaction_cancel(ltdb->tdb);
+diff --git a/lib/ldb/ldb_tdb/ldb_tdb.h b/lib/ldb/ldb_tdb/ldb_tdb.h
+index 7e182495928..9591ee59bf1 100644
+--- a/lib/ldb/ldb_tdb/ldb_tdb.h
++++ b/lib/ldb/ldb_tdb/ldb_tdb.h
+@@ -37,6 +37,8 @@ struct ltdb_private {
+
+ bool read_only;
+
++ bool reindex_failed;
++
+ const struct ldb_schema_syntax *GUID_index_syntax;
+ };
+
+--
+2.14.3
+
+
+From 34ae16944ec51ff6848d7222b58c9a88921a434c Mon Sep 17 00:00:00 2001
+From: Gary Lockyer <gary at catalyst.net.nz>
+Date: Tue, 6 Mar 2018 09:13:31 +1300
+Subject: [PATCH 3/5] lib ldb tests: Prepare to run api and index test on tdb
+ and lmdb
+
+BUG: https://bugzilla.samba.org/show_bug.cgi?id=13335
+
+Signed-off-by: Gary Lockyer <gary at catalyst.net.nz>
+Reviewed-by: Andrew Bartlett <abartlet at samba.org>
+(cherry picked from commit 06d9566ef7005588de18c5a1d07a5b9cd179d17b)
+---
+ lib/ldb/tests/python/api.py | 145 +++++++++++++++++++++++++-----------------
+ lib/ldb/tests/python/index.py | 29 ++++++++-
+ 2 files changed, 114 insertions(+), 60 deletions(-)
+
+diff --git a/lib/ldb/tests/python/api.py b/lib/ldb/tests/python/api.py
+index 409f446f1ea..eb6f4544ffb 100755
+--- a/lib/ldb/tests/python/api.py
++++ b/lib/ldb/tests/python/api.py
+@@ -12,6 +12,9 @@ import shutil
+
+ PY3 = sys.version_info > (3, 0)
+
++TDB_PREFIX = "tdb://"
++MDB_PREFIX = "mdb://"
++
+
+ def tempdir():
+ import tempfile
+@@ -44,13 +47,36 @@ class NoContextTests(TestCase):
+ encoded2 = ldb.binary_encode('test\\x')
+ self.assertEqual(encoded2, encoded)
+
+-class SimpleLdb(TestCase):
++
++class LdbBaseTest(TestCase):
++ def setUp(self):
++ super(LdbBaseTest, self).setUp()
++ try:
++ if self.prefix is None:
++ self.prefix = TDB_PREFIX
++ except AttributeError:
++ self.prefix = TDB_PREFIX
++
++ def tearDown(self):
++ super(LdbBaseTest, self).tearDown()
++
++ def url(self):
++ return self.prefix + self.filename
++
++ def flags(self):
++ if self.prefix == MDB_PREFIX:
++ return ldb.FLG_NOSYNC
++ else:
++ return 0
++
++
++class SimpleLdb(LdbBaseTest):
+
+ def setUp(self):
+ super(SimpleLdb, self).setUp()
+ self.testdir = tempdir()
+ self.filename = os.path.join(self.testdir, "test.ldb")
+- self.ldb = ldb.Ldb(self.filename)
++ self.ldb = ldb.Ldb(self.url(), flags=self.flags())
+
+ def tearDown(self):
+ shutil.rmtree(self.testdir)
+@@ -58,16 +84,15 @@ class SimpleLdb(TestCase):
+ # Ensure the LDB is closed now, so we close the FD
+ del(self.ldb)
+
+-
+ def test_connect(self):
+- ldb.Ldb(self.filename)
++ ldb.Ldb(self.url(), flags=self.flags())
+
+ def test_connect_none(self):
+ ldb.Ldb()
+
+ def test_connect_later(self):
+ x = ldb.Ldb()
+- x.connect(self.filename)
++ x.connect(self.url(), flags=self.flags())
+
+ def test_repr(self):
+ x = ldb.Ldb()
+@@ -82,7 +107,7 @@ class SimpleLdb(TestCase):
+ self.assertEqual([], x.modules())
+
+ def test_modules_tdb(self):
+- x = ldb.Ldb(self.filename)
++ x = ldb.Ldb(self.url(), flags=self.flags())
+ self.assertEqual("[<ldb module 'tdb'>]", repr(x.modules()))
+
+ def test_firstmodule_none(self):
+@@ -90,53 +115,53 @@ class SimpleLdb(TestCase):
+ self.assertEqual(x.firstmodule, None)
+
+ def test_firstmodule_tdb(self):
+- x = ldb.Ldb(self.filename)
++ x = ldb.Ldb(self.url(), flags=self.flags())
+ mod = x.firstmodule
+ self.assertEqual(repr(mod), "<ldb module 'tdb'>")
+
+ def test_search(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ self.assertEqual(len(l.search()), 0)
+
+ def test_search_controls(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ self.assertEqual(len(l.search(controls=["paged_results:0:5"])), 0)
+
+ def test_search_attrs(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ self.assertEqual(len(l.search(ldb.Dn(l, ""), ldb.SCOPE_SUBTREE, "(dc=*)", ["dc"])), 0)
+
+ def test_search_string_dn(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ self.assertEqual(len(l.search("", ldb.SCOPE_SUBTREE, "(dc=*)", ["dc"])), 0)
+
+ def test_search_attr_string(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ self.assertRaises(TypeError, l.search, attrs="dc")
+ self.assertRaises(TypeError, l.search, attrs=b"dc")
+
+ def test_opaque(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ l.set_opaque("my_opaque", l)
+ self.assertTrue(l.get_opaque("my_opaque") is not None)
+ self.assertEqual(None, l.get_opaque("unknown"))
+
+ def test_search_scope_base_empty_db(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ self.assertEqual(len(l.search(ldb.Dn(l, "dc=foo1"),
+ ldb.SCOPE_BASE)), 0)
+
+ def test_search_scope_onelevel_empty_db(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ self.assertEqual(len(l.search(ldb.Dn(l, "dc=foo1"),
+ ldb.SCOPE_ONELEVEL)), 0)
+
+ def test_delete(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ self.assertRaises(ldb.LdbError, lambda: l.delete(ldb.Dn(l, "dc=foo2")))
+
+ def test_delete_w_unhandled_ctrl(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ m = ldb.Message()
+ m.dn = ldb.Dn(l, "dc=foo1")
+ m["b"] = [b"a"]
+@@ -145,10 +170,10 @@ class SimpleLdb(TestCase):
+ l.delete(m.dn)
+
+ def test_contains(self):
+- name = self.filename
+- l = ldb.Ldb(name)
++ name = self.url()
++ l = ldb.Ldb(name, flags=self.flags())
+ self.assertFalse(ldb.Dn(l, "dc=foo3") in l)
+- l = ldb.Ldb(name)
++ l = ldb.Ldb(name, flags=self.flags())
+ m = ldb.Message()
+ m.dn = ldb.Dn(l, "dc=foo3")
+ m["b"] = ["a"]
+@@ -160,23 +185,23 @@ class SimpleLdb(TestCase):
+ l.delete(m.dn)
+
+ def test_get_config_basedn(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ self.assertEqual(None, l.get_config_basedn())
+
+ def test_get_root_basedn(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ self.assertEqual(None, l.get_root_basedn())
+
+ def test_get_schema_basedn(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ self.assertEqual(None, l.get_schema_basedn())
+
+ def test_get_default_basedn(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ self.assertEqual(None, l.get_default_basedn())
+
+ def test_add(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ m = ldb.Message()
+ m.dn = ldb.Dn(l, "dc=foo4")
+ m["bla"] = b"bla"
+@@ -188,7 +213,7 @@ class SimpleLdb(TestCase):
+ l.delete(ldb.Dn(l, "dc=foo4"))
+
+ def test_search_iterator(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ s = l.search_iterator()
+ s.abandon()
+ try:
+@@ -288,7 +313,7 @@ class SimpleLdb(TestCase):
+ l.delete(ldb.Dn(l, "dc=foo5"))
+
+ def test_add_text(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ m = ldb.Message()
+ m.dn = ldb.Dn(l, "dc=foo4")
+ m["bla"] = "bla"
+@@ -300,7 +325,7 @@ class SimpleLdb(TestCase):
+ l.delete(ldb.Dn(l, "dc=foo4"))
+
+ def test_add_w_unhandled_ctrl(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ m = ldb.Message()
+ m.dn = ldb.Dn(l, "dc=foo4")
+ m["bla"] = b"bla"
+@@ -308,7 +333,7 @@ class SimpleLdb(TestCase):
+ self.assertRaises(ldb.LdbError, lambda: l.add(m,["search_options:1:2"]))
+
+ def test_add_dict(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ m = {"dn": ldb.Dn(l, "dc=foo5"),
+ "bla": b"bla"}
+ self.assertEqual(len(l.search()), 0)
+@@ -319,7 +344,7 @@ class SimpleLdb(TestCase):
+ l.delete(ldb.Dn(l, "dc=foo5"))
+
+ def test_add_dict_text(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ m = {"dn": ldb.Dn(l, "dc=foo5"),
+ "bla": "bla"}
+ self.assertEqual(len(l.search()), 0)
+@@ -330,7 +355,7 @@ class SimpleLdb(TestCase):
+ l.delete(ldb.Dn(l, "dc=foo5"))
+
+ def test_add_dict_string_dn(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ m = {"dn": "dc=foo6", "bla": b"bla"}
+ self.assertEqual(len(l.search()), 0)
+ l.add(m)
+@@ -340,7 +365,7 @@ class SimpleLdb(TestCase):
+ l.delete(ldb.Dn(l, "dc=foo6"))
+
+ def test_add_dict_bytes_dn(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ m = {"dn": b"dc=foo6", "bla": b"bla"}
+ self.assertEqual(len(l.search()), 0)
+ l.add(m)
+@@ -350,7 +375,7 @@ class SimpleLdb(TestCase):
+ l.delete(ldb.Dn(l, "dc=foo6"))
+
+ def test_rename(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ m = ldb.Message()
+ m.dn = ldb.Dn(l, "dc=foo7")
+ m["bla"] = b"bla"
+@@ -363,7 +388,7 @@ class SimpleLdb(TestCase):
+ l.delete(ldb.Dn(l, "dc=bar"))
+
+ def test_rename_string_dns(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ m = ldb.Message()
+ m.dn = ldb.Dn(l, "dc=foo8")
+ m["bla"] = b"bla"
+@@ -377,7 +402,7 @@ class SimpleLdb(TestCase):
+ l.delete(ldb.Dn(l, "dc=bar"))
+
+ def test_empty_dn(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ self.assertEqual(0, len(l.search()))
+ m = ldb.Message()
+ m.dn = ldb.Dn(l, "dc=empty")
+@@ -394,7 +419,7 @@ class SimpleLdb(TestCase):
+ self.assertEqual(0, len(rm[0]))
+
+ def test_modify_delete(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ m = ldb.Message()
+ m.dn = ldb.Dn(l, "dc=modifydelete")
+ m["bla"] = [b"1234"]
+@@ -417,7 +442,7 @@ class SimpleLdb(TestCase):
+ l.delete(ldb.Dn(l, "dc=modifydelete"))
+
+ def test_modify_delete_text(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ m = ldb.Message()
+ m.dn = ldb.Dn(l, "dc=modifydelete")
+ m.text["bla"] = ["1234"]
+@@ -440,7 +465,7 @@ class SimpleLdb(TestCase):
+ l.delete(ldb.Dn(l, "dc=modifydelete"))
+
+ def test_modify_add(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ m = ldb.Message()
+ m.dn = ldb.Dn(l, "dc=add")
+ m["bla"] = [b"1234"]
+@@ -458,7 +483,7 @@ class SimpleLdb(TestCase):
+ l.delete(ldb.Dn(l, "dc=add"))
+
+ def test_modify_add_text(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ m = ldb.Message()
+ m.dn = ldb.Dn(l, "dc=add")
+ m.text["bla"] = ["1234"]
+@@ -476,7 +501,7 @@ class SimpleLdb(TestCase):
+ l.delete(ldb.Dn(l, "dc=add"))
+
+ def test_modify_replace(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ m = ldb.Message()
+ m.dn = ldb.Dn(l, "dc=modify2")
+ m["bla"] = [b"1234", b"456"]
+@@ -496,7 +521,7 @@ class SimpleLdb(TestCase):
+ l.delete(ldb.Dn(l, "dc=modify2"))
+
+ def test_modify_replace_text(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ m = ldb.Message()
+ m.dn = ldb.Dn(l, "dc=modify2")
+ m.text["bla"] = ["1234", "456"]
+@@ -516,7 +541,7 @@ class SimpleLdb(TestCase):
+ l.delete(ldb.Dn(l, "dc=modify2"))
+
+ def test_modify_flags_change(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ m = ldb.Message()
+ m.dn = ldb.Dn(l, "dc=add")
+ m["bla"] = [b"1234"]
+@@ -542,7 +567,7 @@ class SimpleLdb(TestCase):
+ l.delete(ldb.Dn(l, "dc=add"))
+
+ def test_modify_flags_change_text(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ m = ldb.Message()
+ m.dn = ldb.Dn(l, "dc=add")
+ m.text["bla"] = ["1234"]
+@@ -568,7 +593,7 @@ class SimpleLdb(TestCase):
+ l.delete(ldb.Dn(l, "dc=add"))
+
+ def test_transaction_commit(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ l.transaction_start()
+ m = ldb.Message(ldb.Dn(l, "dc=foo9"))
+ m["foo"] = [b"bar"]
+@@ -577,7 +602,7 @@ class SimpleLdb(TestCase):
+ l.delete(m.dn)
+
+ def test_transaction_cancel(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ l.transaction_start()
+ m = ldb.Message(ldb.Dn(l, "dc=foo10"))
+ m["foo"] = [b"bar"]
+@@ -588,12 +613,12 @@ class SimpleLdb(TestCase):
+ def test_set_debug(self):
+ def my_report_fn(level, text):
+ pass
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ l.set_debug(my_report_fn)
+
+ def test_zero_byte_string(self):
+ """Testing we do not get trapped in the \0 byte in a property string."""
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ l.add({
+ "dn" : b"dc=somedn",
+ "objectclass" : b"user",
+@@ -605,10 +630,10 @@ class SimpleLdb(TestCase):
+ self.assertEqual(b"foo\0bar", res[0]["displayname"][0])
+
+ def test_no_crash_broken_expr(self):
+- l = ldb.Ldb(self.filename)
++ l = ldb.Ldb(self.url(), flags=self.flags())
+ self.assertRaises(ldb.LdbError,lambda: l.search("", ldb.SCOPE_SUBTREE, "&(dc=*)(dn=*)", ["dc"]))
+
+-class SearchTests(TestCase):
++class SearchTests(LdbBaseTest):
+ def tearDown(self):
+ shutil.rmtree(self.testdir)
+ super(SearchTests, self).tearDown()
+@@ -621,7 +646,9 @@ class SearchTests(TestCase):
+ super(SearchTests, self).setUp()
+ self.testdir = tempdir()
+ self.filename = os.path.join(self.testdir, "search_test.ldb")
+- self.l = ldb.Ldb(self.filename, options=["modules:rdn_name"])
++ self.l = ldb.Ldb(self.url(),
++ flags=self.flags(),
++ options=["modules:rdn_name"])
+
+ self.l.add({"dn": "@ATTRIBUTES",
+ "DC": "CASE_INSENSITIVE"})
+@@ -1030,7 +1057,6 @@ class SearchTests(TestCase):
+ self.assertEqual(len(res11), 1)
+
+
+-
+ class IndexedSearchTests(SearchTests):
+ """Test searches using the index, to ensure the index doesn't
+ break things"""
+@@ -1091,6 +1117,7 @@ class GUIDIndexedSearchTests(SearchTests):
+ self.IDXGUID = True
+ self.IDXONE = True
+
++
+ class GUIDIndexedDNFilterSearchTests(SearchTests):
+ """Test searches using the index, to ensure the index doesn't
+ break things"""
+@@ -1126,7 +1153,7 @@ class GUIDAndOneLevelIndexedSearchTests(SearchTests):
+ self.IDXONE = True
+
+
+-class AddModifyTests(TestCase):
++class AddModifyTests(LdbBaseTest):
+ def tearDown(self):
+ shutil.rmtree(self.testdir)
+ super(AddModifyTests, self).tearDown()
+@@ -1138,7 +1165,9 @@ class AddModifyTests(TestCase):
+ super(AddModifyTests, self).setUp()
+ self.testdir = tempdir()
+ self.filename = os.path.join(self.testdir, "add_test.ldb")
+- self.l = ldb.Ldb(self.filename, options=["modules:rdn_name"])
++ self.l = ldb.Ldb(self.url(),
++ flags=self.flags(),
++ options=["modules:rdn_name"])
+ self.l.add({"dn": "DC=SAMBA,DC=ORG",
+ "name": b"samba.org",
+ "objectUUID": b"0123456789abcdef"})
+@@ -1266,6 +1295,7 @@ class AddModifyTests(TestCase):
+ "x": "z", "y": "a",
+ "objectUUID": b"0123456789abcde3"})
+
++
+ class IndexedAddModifyTests(AddModifyTests):
+ """Test searches using the index, to ensure the index doesn't
+ break things"""
+@@ -1378,7 +1408,6 @@ class TransIndexedAddModifyTests(IndexedAddModifyTests):
+ super(TransIndexedAddModifyTests, self).tearDown()
+
+
+-
+ class DnTests(TestCase):
+
+ def setUp(self):
+@@ -1985,13 +2014,13 @@ class ModuleTests(TestCase):
+ l = ldb.Ldb(self.filename)
+ self.assertEqual(["init"], ops)
+
+-class LdbResultTests(TestCase):
++class LdbResultTests(LdbBaseTest):
+
+ def setUp(self):
+ super(LdbResultTests, self).setUp()
+ self.testdir = tempdir()
+ self.filename = os.path.join(self.testdir, "test.ldb")
+- self.l = ldb.Ldb(self.filename)
++ self.l = ldb.Ldb(self.url(), flags=self.flags())
+ self.l.add({"dn": "DC=SAMBA,DC=ORG", "name": b"samba.org"})
+ self.l.add({"dn": "OU=ADMIN,DC=SAMBA,DC=ORG", "name": b"Admins"})
+ self.l.add({"dn": "OU=USERS,DC=SAMBA,DC=ORG", "name": b"Users"})
+@@ -2099,7 +2128,7 @@ class LdbResultTests(TestCase):
+ del(self.l)
+ gc.collect()
+
+- child_ldb = ldb.Ldb(self.filename)
++ child_ldb = ldb.Ldb(self.url(), flags=self.flags())
+ # start a transaction
+ child_ldb.transaction_start()
+
+@@ -2170,7 +2199,7 @@ class LdbResultTests(TestCase):
+ del(self.l)
+ gc.collect()
+
+- child_ldb = ldb.Ldb(self.filename)
++ child_ldb = ldb.Ldb(self.url(), flags=self.flags())
+ # start a transaction
+ child_ldb.transaction_start()
+
+diff --git a/lib/ldb/tests/python/index.py b/lib/ldb/tests/python/index.py
+index cd3735b5625..d8a84f26b4c 100755
+--- a/lib/ldb/tests/python/index.py
++++ b/lib/ldb/tests/python/index.py
+@@ -37,6 +37,9 @@ import shutil
+
+ PY3 = sys.version_info > (3, 0)
+
++TDB_PREFIX = "tdb://"
++MDB_PREFIX = "mdb://"
++
+
+ def tempdir():
+ import tempfile
+@@ -57,7 +60,29 @@ def contains(result, dn):
+ return False
+
+
+-class MaxIndexKeyLengthTests(TestCase):
++class LdbBaseTest(TestCase):
++ def setUp(self):
++ super(LdbBaseTest, self).setUp()
++ try:
++ if self.prefix is None:
++ self.prefix = TDB_PREFIX
++ except AttributeError:
++ self.prefix = TDB_PREFIX
++
++ def tearDown(self):
++ super(LdbBaseTest, self).tearDown()
++
++ def url(self):
++ return self.prefix + self.filename
++
++ def flags(self):
++ if self.prefix == MDB_PREFIX:
++ return ldb.FLG_NOSYNC
++ else:
++ return 0
++
++
++class MaxIndexKeyLengthTests(LdbBaseTest):
+ def checkGuids(self, key, guids):
+ #
+ # This check relies on the current implementation where the indexes
+@@ -94,7 +119,7 @@ class MaxIndexKeyLengthTests(TestCase):
+ self.testdir = tempdir()
+ self.filename = os.path.join(self.testdir, "key_len_test.ldb")
+ # Note that the maximum key length is set to 50
+- self.l = ldb.Ldb(self.filename,
++ self.l = ldb.Ldb(self.url(),
+ options=[
+ "modules:rdn_name",
+ "max_key_len_for_self_test:50"])
+--
+2.14.3
+
+
+From e32ade946bfceb81bd0671b1af698ffd13d4c40b Mon Sep 17 00:00:00 2001
+From: Andrew Bartlett <abartlet at samba.org>
+Date: Mon, 26 Mar 2018 16:07:45 +1300
+Subject: [PATCH 4/5] ldb: Add test to show a reindex failure must not leave
+ the DB corrupt
+
+BUG: https://bugzilla.samba.org/show_bug.cgi?id=13335
+
+Signed-off-by: Andrew Bartlett <abartlet at samba.org>
+Reviewed-by: Gary Lockyer <gary at catalyst.net.nz>
+
+Autobuild-User(master): Andrew Bartlett <abartlet at samba.org>
+Autobuild-Date(master): Thu Apr 5 07:53:10 CEST 2018 on sn-devel-144
+
+(cherry picked from commit 653a0a1ba932fc0cc567253f3e153b2928505ba2)
+---
+ lib/ldb/tests/python/api.py | 160 ++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 160 insertions(+)
+
+diff --git a/lib/ldb/tests/python/api.py b/lib/ldb/tests/python/api.py
+index eb6f4544ffb..1167517fd5c 100755
+--- a/lib/ldb/tests/python/api.py
++++ b/lib/ldb/tests/python/api.py
+@@ -1408,6 +1408,166 @@ class TransIndexedAddModifyTests(IndexedAddModifyTests):
+ super(TransIndexedAddModifyTests, self).tearDown()
+
+
++class BadIndexTests(LdbBaseTest):
++ def setUp(self):
++ super(BadIndexTests, self).setUp()
++ self.testdir = tempdir()
++ self.filename = os.path.join(self.testdir, "test.ldb")
++ self.ldb = ldb.Ldb(self.url(), flags=self.flags())
++ if hasattr(self, 'IDXGUID'):
++ self.ldb.add({"dn": "@INDEXLIST",
++ "@IDXATTR": [b"x", b"y", b"ou"],
++ "@IDXGUID": [b"objectUUID"],
++ "@IDX_DN_GUID": [b"GUID"]})
++ else:
++ self.ldb.add({"dn": "@INDEXLIST",
++ "@IDXATTR": [b"x", b"y", b"ou"]})
++
++ super(BadIndexTests, self).setUp()
++
++ def test_unique(self):
++ self.ldb.add({"dn": "x=x,dc=samba,dc=org",
++ "objectUUID": b"0123456789abcde1",
++ "y": "1"})
++ self.ldb.add({"dn": "x=y,dc=samba,dc=org",
++ "objectUUID": b"0123456789abcde2",
++ "y": "1"})
++ self.ldb.add({"dn": "x=z,dc=samba,dc=org",
++ "objectUUID": b"0123456789abcde3",
++ "y": "1"})
++
++ res = self.ldb.search(expression="(y=1)",
++ base="dc=samba,dc=org")
++ self.assertEquals(len(res), 3)
++
++ # Now set this to unique index, but forget to check the result
++ try:
++ self.ldb.add({"dn": "@ATTRIBUTES",
++ "y": "UNIQUE_INDEX"})
++ self.fail()
++ except ldb.LdbError:
++ pass
++
++ # We must still have a working index
++ res = self.ldb.search(expression="(y=1)",
++ base="dc=samba,dc=org")
++ self.assertEquals(len(res), 3)
++
++ def test_unique_transaction(self):
++ self.ldb.add({"dn": "x=x,dc=samba,dc=org",
++ "objectUUID": b"0123456789abcde1",
++ "y": "1"})
++ self.ldb.add({"dn": "x=y,dc=samba,dc=org",
++ "objectUUID": b"0123456789abcde2",
++ "y": "1"})
++ self.ldb.add({"dn": "x=z,dc=samba,dc=org",
++ "objectUUID": b"0123456789abcde3",
++ "y": "1"})
++
++ res = self.ldb.search(expression="(y=1)",
++ base="dc=samba,dc=org")
++ self.assertEquals(len(res), 3)
++
++ self.ldb.transaction_start()
++
++ # Now set this to unique index, but forget to check the result
++ try:
++ self.ldb.add({"dn": "@ATTRIBUTES",
++ "y": "UNIQUE_INDEX"})
++ except ldb.LdbError:
++ pass
++
++ try:
++ self.ldb.transaction_commit()
++ self.fail()
++
++ except ldb.LdbError as err:
++ enum = err.args[0]
++ self.assertEqual(enum, ldb.ERR_OPERATIONS_ERROR)
++
++ # We must still have a working index
++ res = self.ldb.search(expression="(y=1)",
++ base="dc=samba,dc=org")
++
++ self.assertEquals(len(res), 3)
++
++ def test_casefold(self):
++ self.ldb.add({"dn": "x=x,dc=samba,dc=org",
++ "objectUUID": b"0123456789abcde1",
++ "y": "a"})
++ self.ldb.add({"dn": "x=y,dc=samba,dc=org",
++ "objectUUID": b"0123456789abcde2",
++ "y": "A"})
++ self.ldb.add({"dn": "x=z,dc=samba,dc=org",
++ "objectUUID": b"0123456789abcde3",
++ "y": ["a", "A"]})
++
++ res = self.ldb.search(expression="(y=a)",
++ base="dc=samba,dc=org")
++ self.assertEquals(len(res), 2)
++
++ self.ldb.add({"dn": "@ATTRIBUTES",
++ "y": "CASE_INSENSITIVE"})
++
++ # We must still have a working index
++ res = self.ldb.search(expression="(y=a)",
++ base="dc=samba,dc=org")
++
++ if hasattr(self, 'IDXGUID'):
++ self.assertEquals(len(res), 3)
++ else:
++ # We should not return this entry twice, but sadly
++ # we have not yet fixed
++ # https://bugzilla.samba.org/show_bug.cgi?id=13361
++ self.assertEquals(len(res), 4)
++
++ def test_casefold_transaction(self):
++ self.ldb.add({"dn": "x=x,dc=samba,dc=org",
++ "objectUUID": b"0123456789abcde1",
++ "y": "a"})
++ self.ldb.add({"dn": "x=y,dc=samba,dc=org",
++ "objectUUID": b"0123456789abcde2",
++ "y": "A"})
++ self.ldb.add({"dn": "x=z,dc=samba,dc=org",
++ "objectUUID": b"0123456789abcde3",
++ "y": ["a", "A"]})
++
++ res = self.ldb.search(expression="(y=a)",
++ base="dc=samba,dc=org")
++ self.assertEquals(len(res), 2)
++
++ self.ldb.transaction_start()
++
++ self.ldb.add({"dn": "@ATTRIBUTES",
++ "y": "CASE_INSENSITIVE"})
++
++ self.ldb.transaction_commit()
++
++ # We must still have a working index
++ res = self.ldb.search(expression="(y=a)",
++ base="dc=samba,dc=org")
++
++ if hasattr(self, 'IDXGUID'):
++ self.assertEquals(len(res), 3)
++ else:
++ # We should not return this entry twice, but sadly
++ # we have not yet fixed
++ # https://bugzilla.samba.org/show_bug.cgi?id=13361
++ self.assertEquals(len(res), 4)
++
++
++ def tearDown(self):
++ super(BadIndexTests, self).tearDown()
++
++
++class GUIDBadIndexTests(BadIndexTests):
++ """Test Bad index things with GUID index mode"""
++ def setUp(self):
++ self.IDXGUID = True
++
++ super(GUIDBadIndexTests, self).setUp()
++
++
+ class DnTests(TestCase):
+
+ def setUp(self):
+--
+2.14.3
+
+
+From 7fef1bb5b2f8c126430c72b42a595552cc1fd48f Mon Sep 17 00:00:00 2001
+From: Gary Lockyer <gary at catalyst.net.nz>
+Date: Wed, 28 Feb 2018 11:47:22 +1300
+Subject: [PATCH 5/5] ldb_tdb: Do not fail in GUID index mode if there is a
+ duplicate attribute
+
+It is not the job of the index code to enforce this, but do give a
+a warning given it has been detected.
+
+However, now that we do allow it, we must never return the same
+object twice to the caller, so filter for it in ltdb_index_filter().
+
+The GUID list is sorted, which makes this cheap to handle, thankfully.
+
+Signed-off-by: Gary Lockyer <gary at catalyst.net.nz>
+Reviewed-by: Douglas Bagnall <douglas.bagnall at catalyst.net.nz>
+Reviewed-by: Andrew Bartlett <abartlet at samba.org>
+
+BUG: https://bugzilla.samba.org/show_bug.cgi?id=13335
+
+(cherry picked from commit 5c1504b94d1417894176811f18c5d450de22cfd2)
+---
+ lib/ldb/ldb_tdb/ldb_index.c | 64 ++++++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 57 insertions(+), 7 deletions(-)
+
+diff --git a/lib/ldb/ldb_tdb/ldb_index.c b/lib/ldb/ldb_tdb/ldb_index.c
+index 99fef23662f..ee2027319e3 100644
+--- a/lib/ldb/ldb_tdb/ldb_index.c
++++ b/lib/ldb/ldb_tdb/ldb_index.c
+@@ -1526,6 +1526,7 @@ static int ltdb_index_filter(struct ltdb_private *ltdb,
+ struct ldb_message *msg;
+ struct ldb_message *filtered_msg;
+ unsigned int i;
++ uint8_t previous_guid_key[LTDB_GUID_KEY_SIZE] = {};
+
+ ldb = ldb_module_get_ctx(ac->module);
+
+@@ -1538,11 +1539,6 @@ static int ltdb_index_filter(struct ltdb_private *ltdb,
+ int ret;
+ bool matched;
+
+- msg = ldb_msg_new(ac);
+- if (!msg) {
+- return LDB_ERR_OPERATIONS_ERROR;
+- }
+-
+ ret = ltdb_idx_to_key(ac->module, ltdb,
+ ac, &dn_list->dn[i],
+ &tdb_key);
+@@ -1550,6 +1546,33 @@ static int ltdb_index_filter(struct ltdb_private *ltdb,
+ return ret;
+ }
+
++ if (ltdb->cache->GUID_index_attribute != NULL) {
++ /*
++ * If we are in GUID index mode, then the dn_list is
++ * sorted. If we got a duplicate, forget about it, as
++ * otherwise we would send the same entry back more
++ * than once.
++ *
++ * This is needed in the truncated DN case, or if a
++ * duplicate was forced in via
++ * LDB_FLAG_INTERNAL_DISABLE_SINGLE_VALUE_CHECK
++ */
++
++ if (memcmp(previous_guid_key, tdb_key.dptr,
++ sizeof(previous_guid_key)) == 0) {
++ continue;
++ }
++
++ memcpy(previous_guid_key, tdb_key.dptr,
++ sizeof(previous_guid_key));
++ }
++
++ msg = ldb_msg_new(ac);
++ if (!msg) {
++ return LDB_ERR_OPERATIONS_ERROR;
++ }
++
++
+ ret = ltdb_search_key(ac->module, ltdb,
+ tdb_key, msg,
+ LDB_UNPACK_DATA_FLAG_NO_DATA_ALLOC|
+@@ -1923,9 +1946,36 @@ static int ltdb_index_add1(struct ldb_module *module,
+ BINARY_ARRAY_SEARCH_GTE(list->dn, list->count,
+ *key_val, ldb_val_equal_exact_ordered,
+ exact, next);
++
++ /*
++ * Give a warning rather than fail, this could be a
++ * duplicate value in the record allowed by a caller
++ * forcing in the value with
++ * LDB_FLAG_INTERNAL_DISABLE_SINGLE_VALUE_CHECK
++ */
+ if (exact != NULL) {
+- talloc_free(list);
+- return LDB_ERR_OPERATIONS_ERROR;
++ /* This can't fail, gives a default at worst */
++ const struct ldb_schema_attribute *attr
++ = ldb_schema_attribute_by_name(
++ ldb,
++ ltdb->cache->GUID_index_attribute);
++ struct ldb_val v;
++ ret = attr->syntax->ldif_write_fn(ldb, list,
++ exact, &v);
++ if (ret == LDB_SUCCESS) {
++ ldb_debug(ldb, LDB_DEBUG_WARNING,
++ __location__
++ ": duplicate attribute value in %s "
++ "for index on %s, "
++ "duplicate of %s %*.*s in %s",
++ ldb_dn_get_linearized(msg->dn),
++ el->name,
++ ltdb->cache->GUID_index_attribute,
++ (int)v.length,
++ (int)v.length,
++ v.data,
++ ldb_dn_get_linearized(dn_key));
++ }
+ }
+
+ if (next == NULL) {
+--
+2.14.3
+
Copied: samba/repos/testing-x86_64/nmbd.service (from rev 323030, samba/trunk/nmbd.service)
===================================================================
--- testing-x86_64/nmbd.service (rev 0)
+++ testing-x86_64/nmbd.service 2018-04-26 11:11:40 UTC (rev 323031)
@@ -0,0 +1,12 @@
+[Unit]
+Description=Samba NetBIOS name server
+After=network.target
+
+[Service]
+Type=forking
+PIDFile=/var/run/nmbd.pid
+ExecStart=/usr/bin/nmbd -D
+ExecReload=/bin/kill -HUP $MAINPID
+
+[Install]
+WantedBy=multi-user.target
Copied: samba/repos/testing-x86_64/samba.conf (from rev 323030, samba/trunk/samba.conf)
===================================================================
--- testing-x86_64/samba.conf (rev 0)
+++ testing-x86_64/samba.conf 2018-04-26 11:11:40 UTC (rev 323031)
@@ -0,0 +1,2 @@
+D /run/samba 0755 - - -
+d /var/log/samba 0755 - - -
\ No newline at end of file
Copied: samba/repos/testing-x86_64/samba.conf.d (from rev 323030, samba/trunk/samba.conf.d)
===================================================================
--- testing-x86_64/samba.conf.d (rev 0)
+++ testing-x86_64/samba.conf.d 2018-04-26 11:11:40 UTC (rev 323031)
@@ -0,0 +1,18 @@
+## Path: Network/Samba
+## Description: Samba process options
+## Type: string
+## Default: ""
+## ServiceRestart: samba
+SAMBAOPTIONS=""
+## Type: string
+## Default: ""
+## ServiceRestart: smb
+SMBDOPTIONS=""
+## Type: string
+## Default: ""
+## ServiceRestart: nmb
+NMBDOPTIONS=""
+## Type: string
+## Default: ""
+## ServiceRestart: winbind
+WINBINDOPTIONS=""
Copied: samba/repos/testing-x86_64/samba.install (from rev 323030, samba/trunk/samba.install)
===================================================================
--- testing-x86_64/samba.install (rev 0)
+++ testing-x86_64/samba.install 2018-04-26 11:11:40 UTC (rev 323031)
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+post_upgrade() {
+ if [ "$(vercmp $2 4.0.4)" -lt 0 ]; then
+ echo "Major upgrade from samba 3.x to 4.x,"
+ echo "please read the Samba4 migration guide:"
+ echo "http://wiki.samba.org/index.php/Samba4/samba3upgrade/HOWTO"
+ fi
+}
+# vim:set ts=2 sw=2 et:
Copied: samba/repos/testing-x86_64/samba.logrotate (from rev 323030, samba/trunk/samba.logrotate)
===================================================================
--- testing-x86_64/samba.logrotate (rev 0)
+++ testing-x86_64/samba.logrotate 2018-04-26 11:11:40 UTC (rev 323031)
@@ -0,0 +1,5 @@
+/var/log/samba/log.smbd /var/log/samba/log.nmbd /var/log/samba/*.log {
+ notifempty
+ missingok
+ copytruncate
+}
Copied: samba/repos/testing-x86_64/samba.pam (from rev 323030, samba/trunk/samba.pam)
===================================================================
--- testing-x86_64/samba.pam (rev 0)
+++ testing-x86_64/samba.pam 2018-04-26 11:11:40 UTC (rev 323031)
@@ -0,0 +1,3 @@
+auth required pam_unix.so
+account required pam_unix.so
+session required pam_unix.so
Copied: samba/repos/testing-x86_64/samba.service (from rev 323030, samba/trunk/samba.service)
===================================================================
--- testing-x86_64/samba.service (rev 0)
+++ testing-x86_64/samba.service 2018-04-26 11:11:40 UTC (rev 323031)
@@ -0,0 +1,14 @@
+[Unit]
+Description=Samba AD Daemon
+After=syslog.target network.target
+
+[Service]
+Type=forking
+PIDFile=/var/run/samba.pid
+LimitNOFILE=16384
+EnvironmentFile=-/etc/conf.d/samba
+ExecStart=/usr/bin/samba $SAMBAOPTIONS
+ExecReload=/usr/bin/kill -HUP $MAINPID
+
+[Install]
+WantedBy=multi-user.target
Copied: samba/repos/testing-x86_64/smbd.service (from rev 323030, samba/trunk/smbd.service)
===================================================================
--- testing-x86_64/smbd.service (rev 0)
+++ testing-x86_64/smbd.service 2018-04-26 11:11:40 UTC (rev 323031)
@@ -0,0 +1,12 @@
+[Unit]
+Description=Samba SMB/CIFS server
+After=network.target nmbd.service winbindd.service
+
+[Service]
+Type=forking
+PIDFile=/var/run/smbd.pid
+ExecStart=/usr/bin/smbd -D
+ExecReload=/bin/kill -HUP $MAINPID
+
+[Install]
+WantedBy=multi-user.target
Copied: samba/repos/testing-x86_64/smbd.socket (from rev 323030, samba/trunk/smbd.socket)
===================================================================
--- testing-x86_64/smbd.socket (rev 0)
+++ testing-x86_64/smbd.socket 2018-04-26 11:11:40 UTC (rev 323031)
@@ -0,0 +1,9 @@
+[Unit]
+Description=Samba SMB/CIFS server socket
+
+[Socket]
+ListenStream=445
+Accept=yes
+
+[Install]
+WantedBy=sockets.target
Copied: samba/repos/testing-x86_64/smbd at .service (from rev 323030, samba/trunk/smbd at .service)
===================================================================
--- testing-x86_64/smbd at .service (rev 0)
+++ testing-x86_64/smbd at .service 2018-04-26 11:11:40 UTC (rev 323031)
@@ -0,0 +1,7 @@
+[Unit]
+Description=Samba SMB/CIFS server instance
+
+[Service]
+ExecStart=/usr/bin/smbd -F
+ExecReload=/bin/kill -HUP $MAINPID
+StandardInput=socket
Copied: samba/repos/testing-x86_64/winbindd.service (from rev 323030, samba/trunk/winbindd.service)
===================================================================
--- testing-x86_64/winbindd.service (rev 0)
+++ testing-x86_64/winbindd.service 2018-04-26 11:11:40 UTC (rev 323031)
@@ -0,0 +1,12 @@
+[Unit]
+Description=Samba Winbind daemon
+After=network.target nmbd.service
+
+[Service]
+Type=forking
+PIDFile=/var/run/winbindd.pid
+ExecStart=/usr/bin/winbindd -D
+ExecReload=/bin/kill -HUP $MAINPID
+
+[Install]
+WantedBy=multi-user.target
More information about the arch-commits
mailing list