001/* 002 * CDDL HEADER START 003 * 004 * The contents of this file are subject to the terms of the 005 * Common Development and Distribution License, Version 1.0 only 006 * (the "License"). You may not use this file except in compliance 007 * with the License. 008 * 009 * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt 010 * or http://forgerock.org/license/CDDLv1.0.html. 011 * See the License for the specific language governing permissions 012 * and limitations under the License. 013 * 014 * When distributing Covered Code, include this CDDL HEADER in each 015 * file and include the License file at legal-notices/CDDLv1_0.txt. 016 * If applicable, add the following below this CDDL HEADER, with the 017 * fields enclosed by brackets "[]" replaced with your own identifying 018 * information: 019 * Portions Copyright [yyyy] [name of copyright owner] 020 * 021 * CDDL HEADER END 022 * 023 * 024 * Copyright 2006-2010 Sun Microsystems, Inc. 025 * Portions Copyright 2011-2015 ForgeRock AS 026 * Portions copyright 2013 Manuel Gaupp 027 */ 028package org.opends.server.backends.pluggable; 029 030import static org.forgerock.util.Utils.*; 031import static org.opends.messages.BackendMessages.*; 032import static org.opends.server.backends.pluggable.DnKeyFormat.*; 033import static org.opends.server.backends.pluggable.EntryIDSet.*; 034import static org.opends.server.backends.pluggable.IndexFilter.*; 035import static org.opends.server.backends.pluggable.VLVIndex.*; 036import static org.opends.server.core.DirectoryServer.*; 037import static org.opends.server.protocols.ldap.LDAPResultCode.*; 038import static org.opends.server.types.AdditionalLogItem.*; 039import static org.opends.server.util.StaticUtils.*; 040 041import java.util.ArrayList; 042import java.util.Arrays; 043import java.util.Collection; 044import java.util.Collections; 045import java.util.HashMap; 046import java.util.Iterator; 047import java.util.List; 048import java.util.Map; 049import java.util.NoSuchElementException; 050import java.util.TreeMap; 051import java.util.concurrent.locks.Lock; 052import java.util.concurrent.locks.ReentrantReadWriteLock; 053 054import org.forgerock.i18n.LocalizableMessage; 055import org.forgerock.i18n.LocalizableMessageBuilder; 056import org.forgerock.i18n.slf4j.LocalizedLogger; 057import org.forgerock.opendj.config.server.ConfigChangeResult; 058import org.forgerock.opendj.config.server.ConfigException; 059import org.forgerock.opendj.ldap.ByteSequence; 060import org.forgerock.opendj.ldap.ByteString; 061import org.forgerock.opendj.ldap.ByteStringBuilder; 062import org.forgerock.opendj.ldap.ResultCode; 063import org.forgerock.opendj.ldap.SearchScope; 064import org.opends.messages.CoreMessages; 065import org.opends.server.admin.server.ConfigurationAddListener; 066import org.opends.server.admin.server.ConfigurationChangeListener; 067import org.opends.server.admin.server.ConfigurationDeleteListener; 068import org.opends.server.admin.std.server.BackendIndexCfg; 069import org.opends.server.admin.std.server.BackendVLVIndexCfg; 070import org.opends.server.admin.std.server.PluggableBackendCfg; 071import org.opends.server.api.ClientConnection; 072import org.opends.server.api.EntryCache; 073import org.opends.server.api.VirtualAttributeProvider; 074import org.opends.server.api.plugin.PluginResult.SubordinateDelete; 075import org.opends.server.api.plugin.PluginResult.SubordinateModifyDN; 076import org.opends.server.backends.pluggable.spi.AccessMode; 077import org.opends.server.backends.pluggable.spi.Cursor; 078import org.opends.server.backends.pluggable.spi.ReadOperation; 079import org.opends.server.backends.pluggable.spi.ReadableTransaction; 080import org.opends.server.backends.pluggable.spi.SequentialCursor; 081import org.opends.server.backends.pluggable.spi.Storage; 082import org.opends.server.backends.pluggable.spi.StorageRuntimeException; 083import org.opends.server.backends.pluggable.spi.TreeName; 084import org.opends.server.backends.pluggable.spi.WriteOperation; 085import org.opends.server.backends.pluggable.spi.WriteableTransaction; 086import org.opends.server.controls.PagedResultsControl; 087import org.opends.server.controls.ServerSideSortRequestControl; 088import org.opends.server.controls.ServerSideSortResponseControl; 089import org.opends.server.controls.SubtreeDeleteControl; 090import org.opends.server.controls.VLVRequestControl; 091import org.opends.server.controls.VLVResponseControl; 092import org.opends.server.core.AddOperation; 093import org.opends.server.core.DeleteOperation; 094import org.opends.server.core.DirectoryServer; 095import org.opends.server.core.ModifyDNOperation; 096import org.opends.server.core.ModifyOperation; 097import org.opends.server.core.SearchOperation; 098import org.opends.server.protocols.ldap.LDAPResultCode; 099import org.opends.server.types.Attribute; 100import org.opends.server.types.AttributeType; 101import org.opends.server.types.Attributes; 102import org.opends.server.types.CanceledOperationException; 103import org.opends.server.types.Control; 104import org.opends.server.types.DN; 105import org.opends.server.types.DirectoryException; 106import org.opends.server.types.Entry; 107import org.opends.server.types.Modification; 108import org.opends.server.types.Operation; 109import org.opends.server.types.Privilege; 110import org.opends.server.types.RDN; 111import org.opends.server.types.SearchFilter; 112import org.opends.server.types.SortKey; 113import org.opends.server.types.SortOrder; 114import org.opends.server.types.VirtualAttributeRule; 115import org.opends.server.util.ServerConstants; 116import org.opends.server.util.StaticUtils; 117 118/** 119 * Storage container for LDAP entries. Each base DN of a backend is given 120 * its own entry container. The entry container is the object that implements 121 * the guts of the backend API methods for LDAP operations. 122 */ 123public class EntryContainer 124 implements SuffixContainer, ConfigurationChangeListener<PluggableBackendCfg> 125{ 126 private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass(); 127 128 /** Number of EntryID to considers when building EntryIDSet from DN2ID. */ 129 private static final int SCOPE_IDSET_LIMIT = 4096; 130 /** The name of the entry tree. */ 131 private static final String ID2ENTRY_TREE_NAME = ID2ENTRY_INDEX_NAME; 132 /** The name of the DN tree. */ 133 private static final String DN2ID_TREE_NAME = DN2ID_INDEX_NAME; 134 /** The name of the children index tree. */ 135 private static final String ID2CHILDREN_COUNT_TREE_NAME = ID2CHILDREN_COUNT_NAME; 136 /** The name of the referral tree. */ 137 private static final String REFERRAL_TREE_NAME = REFERRAL_INDEX_NAME; 138 /** The name of the state tree. */ 139 private static final String STATE_TREE_NAME = STATE_INDEX_NAME; 140 141 /** The attribute index configuration manager. */ 142 private final AttributeIndexCfgManager attributeIndexCfgManager; 143 /** The vlv index configuration manager. */ 144 private final VLVIndexCfgManager vlvIndexCfgManager; 145 146 /** ID of the backend to which this entry container belongs. */ 147 private final String backendID; 148 149 /** The root container in which this entryContainer belongs. */ 150 private final RootContainer rootContainer; 151 152 /** The baseDN this entry container is responsible for. */ 153 private final DN baseDN; 154 155 /** The backend configuration. */ 156 private PluggableBackendCfg config; 157 158 /** The tree storage. */ 159 private final Storage storage; 160 161 /** The DN tree maps a normalized DN string to an entry ID (8 bytes). */ 162 private final DN2ID dn2id; 163 /** The entry tree maps an entry ID (8 bytes) to a complete encoded entry. */ 164 private ID2Entry id2entry; 165 /** Store the number of children for each entry. */ 166 private final ID2Count id2childrenCount; 167 /** The referral tree maps a normalized DN string to labeled URIs. */ 168 private final DN2URI dn2uri; 169 /** The state tree maps a config DN to config entries. */ 170 private final State state; 171 172 /** The set of attribute indexes. */ 173 private final Map<AttributeType, AttributeIndex> attrIndexMap = new HashMap<>(); 174 175 /** The set of VLV (Virtual List View) indexes. */ 176 private final Map<String, VLVIndex> vlvIndexMap = new HashMap<>(); 177 178 /** 179 * Prevents name clashes for common indexes (like id2entry) across multiple suffixes. 180 * For example when a root container contains multiple suffixes. 181 */ 182 private final String treePrefix; 183 184 /** 185 * This class is responsible for managing the configuration for attribute 186 * indexes used within this entry container. 187 */ 188 private class AttributeIndexCfgManager implements 189 ConfigurationAddListener<BackendIndexCfg>, 190 ConfigurationDeleteListener<BackendIndexCfg> 191 { 192 @Override 193 public boolean isConfigurationAddAcceptable(final BackendIndexCfg cfg, List<LocalizableMessage> unacceptableReasons) 194 { 195 try 196 { 197 new AttributeIndex(cfg, state, EntryContainer.this); 198 return true; 199 } 200 catch(Exception e) 201 { 202 unacceptableReasons.add(LocalizableMessage.raw(e.getLocalizedMessage())); 203 return false; 204 } 205 } 206 207 @Override 208 public ConfigChangeResult applyConfigurationAdd(final BackendIndexCfg cfg) 209 { 210 final ConfigChangeResult ccr = new ConfigChangeResult(); 211 try 212 { 213 final AttributeIndex index = new AttributeIndex(cfg, state, EntryContainer.this); 214 storage.write(new WriteOperation() 215 { 216 @Override 217 public void run(WriteableTransaction txn) throws Exception 218 { 219 index.open(txn, true); 220 if (!index.isTrusted()) 221 { 222 ccr.setAdminActionRequired(true); 223 ccr.addMessage(NOTE_INDEX_ADD_REQUIRES_REBUILD.get(cfg.getAttribute().getNameOrOID())); 224 } 225 attrIndexMap.put(cfg.getAttribute(), index); 226 } 227 }); 228 } 229 catch(Exception e) 230 { 231 ccr.setResultCode(DirectoryServer.getServerErrorResultCode()); 232 ccr.addMessage(LocalizableMessage.raw(e.getLocalizedMessage())); 233 } 234 return ccr; 235 } 236 237 @Override 238 public boolean isConfigurationDeleteAcceptable( 239 BackendIndexCfg cfg, List<LocalizableMessage> unacceptableReasons) 240 { 241 // TODO: validate more before returning true? 242 return true; 243 } 244 245 @Override 246 public ConfigChangeResult applyConfigurationDelete(final BackendIndexCfg cfg) 247 { 248 final ConfigChangeResult ccr = new ConfigChangeResult(); 249 250 exclusiveLock.lock(); 251 try 252 { 253 storage.write(new WriteOperation() 254 { 255 @Override 256 public void run(WriteableTransaction txn) throws Exception 257 { 258 attrIndexMap.remove(cfg.getAttribute()).closeAndDelete(txn); 259 } 260 }); 261 } 262 catch (Exception de) 263 { 264 ccr.setResultCode(getServerErrorResultCode()); 265 ccr.addMessage(LocalizableMessage.raw(StaticUtils.stackTraceToSingleLineString(de))); 266 } 267 finally 268 { 269 exclusiveLock.unlock(); 270 } 271 272 return ccr; 273 } 274 } 275 276 /** 277 * This class is responsible for managing the configuration for VLV indexes 278 * used within this entry container. 279 */ 280 private class VLVIndexCfgManager implements 281 ConfigurationAddListener<BackendVLVIndexCfg>, 282 ConfigurationDeleteListener<BackendVLVIndexCfg> 283 { 284 @Override 285 public boolean isConfigurationAddAcceptable( 286 BackendVLVIndexCfg cfg, List<LocalizableMessage> unacceptableReasons) 287 { 288 try 289 { 290 SearchFilter.createFilterFromString(cfg.getFilter()); 291 } 292 catch(Exception e) 293 { 294 unacceptableReasons.add(ERR_CONFIG_VLV_INDEX_BAD_FILTER.get( 295 cfg.getFilter(), cfg.getName(), e.getLocalizedMessage())); 296 return false; 297 } 298 299 String[] sortAttrs = cfg.getSortOrder().split(" "); 300 SortKey[] sortKeys = new SortKey[sortAttrs.length]; 301 boolean[] ascending = new boolean[sortAttrs.length]; 302 for(int i = 0; i < sortAttrs.length; i++) 303 { 304 try 305 { 306 if(sortAttrs[i].startsWith("-")) 307 { 308 ascending[i] = false; 309 sortAttrs[i] = sortAttrs[i].substring(1); 310 } 311 else 312 { 313 ascending[i] = true; 314 if(sortAttrs[i].startsWith("+")) 315 { 316 sortAttrs[i] = sortAttrs[i].substring(1); 317 } 318 } 319 } 320 catch(Exception e) 321 { 322 unacceptableReasons.add(ERR_CONFIG_VLV_INDEX_UNDEFINED_ATTR.get(sortKeys[i], cfg.getName())); 323 return false; 324 } 325 326 AttributeType attrType = 327 DirectoryServer.getAttributeType(sortAttrs[i].toLowerCase()); 328 if(attrType == null) 329 { 330 unacceptableReasons.add(ERR_CONFIG_VLV_INDEX_UNDEFINED_ATTR.get(sortAttrs[i], cfg.getName())); 331 return false; 332 } 333 sortKeys[i] = new SortKey(attrType, ascending[i]); 334 } 335 336 return true; 337 } 338 339 @Override 340 public ConfigChangeResult applyConfigurationAdd(final BackendVLVIndexCfg cfg) 341 { 342 final ConfigChangeResult ccr = new ConfigChangeResult(); 343 try 344 { 345 storage.write(new WriteOperation() 346 { 347 @Override 348 public void run(WriteableTransaction txn) throws Exception 349 { 350 VLVIndex vlvIndex = new VLVIndex(cfg, state, storage, EntryContainer.this, txn); 351 vlvIndex.open(txn, true); 352 if(!vlvIndex.isTrusted()) 353 { 354 ccr.setAdminActionRequired(true); 355 ccr.addMessage(NOTE_INDEX_ADD_REQUIRES_REBUILD.get(cfg.getName())); 356 } 357 vlvIndexMap.put(cfg.getName().toLowerCase(), vlvIndex); 358 } 359 }); 360 } 361 catch(Exception e) 362 { 363 ccr.setResultCode(DirectoryServer.getServerErrorResultCode()); 364 ccr.addMessage(LocalizableMessage.raw(StaticUtils.stackTraceToSingleLineString(e))); 365 } 366 return ccr; 367 } 368 369 @Override 370 public boolean isConfigurationDeleteAcceptable(BackendVLVIndexCfg cfg, List<LocalizableMessage> unacceptableReasons) 371 { 372 // TODO: validate more before returning true? 373 return true; 374 } 375 376 @Override 377 public ConfigChangeResult applyConfigurationDelete(final BackendVLVIndexCfg cfg) 378 { 379 final ConfigChangeResult ccr = new ConfigChangeResult(); 380 exclusiveLock.lock(); 381 try 382 { 383 storage.write(new WriteOperation() 384 { 385 @Override 386 public void run(WriteableTransaction txn) throws Exception 387 { 388 vlvIndexMap.remove(cfg.getName().toLowerCase()).closeAndDelete(txn); 389 } 390 }); 391 } 392 catch (Exception e) 393 { 394 ccr.setResultCode(getServerErrorResultCode()); 395 ccr.addMessage(LocalizableMessage.raw(StaticUtils.stackTraceToSingleLineString(e))); 396 } 397 finally 398 { 399 exclusiveLock.unlock(); 400 } 401 return ccr; 402 } 403 404 } 405 406 /** A read write lock to handle schema changes and bulk changes. */ 407 private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); 408 final Lock sharedLock = lock.readLock(); 409 final Lock exclusiveLock = lock.writeLock(); 410 411 /** 412 * Create a new entry container object. 413 * 414 * @param baseDN The baseDN this entry container will be responsible for 415 * storing on disk. 416 * @param backendID ID of the backend that is creating this entry container. 417 * It is needed by the Directory Server entry cache methods. 418 * @param config The configuration of the backend. 419 * @param storage The storage for this entryContainer. 420 * @param rootContainer The root container this entry container is in. 421 * @throws ConfigException if a configuration related error occurs. 422 */ 423 EntryContainer(DN baseDN, String backendID, PluggableBackendCfg config, Storage storage, 424 RootContainer rootContainer) throws ConfigException 425 { 426 this.backendID = backendID; 427 this.baseDN = baseDN; 428 this.config = config; 429 this.storage = storage; 430 this.rootContainer = rootContainer; 431 this.treePrefix = baseDN.toNormalizedUrlSafeString(); 432 this.id2childrenCount = new ID2Count(getIndexName(ID2CHILDREN_COUNT_TREE_NAME)); 433 this.dn2id = new DN2ID(getIndexName(DN2ID_TREE_NAME), baseDN); 434 this.dn2uri = new DN2URI(getIndexName(REFERRAL_TREE_NAME), this); 435 this.state = new State(getIndexName(STATE_TREE_NAME)); 436 437 config.addPluggableChangeListener(this); 438 439 attributeIndexCfgManager = new AttributeIndexCfgManager(); 440 config.addBackendIndexAddListener(attributeIndexCfgManager); 441 config.addBackendIndexDeleteListener(attributeIndexCfgManager); 442 443 vlvIndexCfgManager = new VLVIndexCfgManager(); 444 config.addBackendVLVIndexAddListener(vlvIndexCfgManager); 445 config.addBackendVLVIndexDeleteListener(vlvIndexCfgManager); 446 } 447 448 private TreeName getIndexName(String indexId) 449 { 450 return new TreeName(treePrefix, indexId); 451 } 452 453 /** 454 * Opens the entryContainer for reading and writing. 455 * 456 * @param txn a non null transaction 457 * @param accessMode specifies how the container has to be opened (read-write or read-only) 458 * @throws StorageRuntimeException If an error occurs in the storage. 459 * @throws ConfigException if a configuration related error occurs. 460 */ 461 void open(WriteableTransaction txn, AccessMode accessMode) throws StorageRuntimeException, ConfigException 462 { 463 boolean shouldCreate = accessMode.isWriteable(); 464 try 465 { 466 DataConfig entryDataConfig = 467 new DataConfig(config.isEntriesCompressed(), 468 config.isCompactEncoding(), 469 rootContainer.getCompressedSchema()); 470 471 id2entry = new ID2Entry(getIndexName(ID2ENTRY_TREE_NAME), entryDataConfig); 472 id2entry.open(txn, shouldCreate); 473 id2childrenCount.open(txn, shouldCreate); 474 dn2id.open(txn, shouldCreate); 475 state.open(txn, shouldCreate); 476 dn2uri.open(txn, shouldCreate); 477 478 for (String idx : config.listBackendIndexes()) 479 { 480 BackendIndexCfg indexCfg = config.getBackendIndex(idx); 481 482 final AttributeIndex index = new AttributeIndex(indexCfg, state, this); 483 index.open(txn, shouldCreate); 484 if(!index.isTrusted()) 485 { 486 logger.info(NOTE_INDEX_ADD_REQUIRES_REBUILD, index.getName()); 487 } 488 attrIndexMap.put(indexCfg.getAttribute(), index); 489 } 490 491 for (String idx : config.listBackendVLVIndexes()) 492 { 493 BackendVLVIndexCfg vlvIndexCfg = config.getBackendVLVIndex(idx); 494 495 VLVIndex vlvIndex = new VLVIndex(vlvIndexCfg, state, storage, this, txn); 496 vlvIndex.open(txn, shouldCreate); 497 if(!vlvIndex.isTrusted()) 498 { 499 logger.info(NOTE_INDEX_ADD_REQUIRES_REBUILD, vlvIndex.getName()); 500 } 501 502 vlvIndexMap.put(vlvIndexCfg.getName().toLowerCase(), vlvIndex); 503 } 504 } 505 catch (StorageRuntimeException de) 506 { 507 logger.traceException(de); 508 close(); 509 throw de; 510 } 511 } 512 513 /** 514 * Closes the entry container. 515 * 516 * @throws StorageRuntimeException If an error occurs in the storage. 517 */ 518 @Override 519 public void close() throws StorageRuntimeException 520 { 521 closeSilently(attrIndexMap.values()); 522 closeSilently(vlvIndexMap.values()); 523 524 // Deregister any listeners. 525 config.removePluggableChangeListener(this); 526 config.removeBackendIndexAddListener(attributeIndexCfgManager); 527 config.removeBackendIndexDeleteListener(attributeIndexCfgManager); 528 config.removeBackendVLVIndexAddListener(vlvIndexCfgManager); 529 config.removeBackendVLVIndexDeleteListener(vlvIndexCfgManager); 530 } 531 532 /** 533 * Retrieves a reference to the root container in which this entry container 534 * exists. 535 * 536 * @return A reference to the root container in which this entry container 537 * exists. 538 */ 539 RootContainer getRootContainer() 540 { 541 return rootContainer; 542 } 543 544 /** 545 * Get the DN tree used by this entry container. 546 * The entryContainer must have been opened. 547 * 548 * @return The DN tree. 549 */ 550 DN2ID getDN2ID() 551 { 552 return dn2id; 553 } 554 555 /** 556 * Get the entry tree used by this entry container. 557 * The entryContainer must have been opened. 558 * 559 * @return The entry tree. 560 */ 561 ID2Entry getID2Entry() 562 { 563 return id2entry; 564 } 565 566 /** 567 * Get the referral tree used by this entry container. 568 * The entryContainer must have been opened. 569 * 570 * @return The referral tree. 571 */ 572 DN2URI getDN2URI() 573 { 574 return dn2uri; 575 } 576 577 /** 578 * Get the children tree used by this entry container. 579 * The entryContainer must have been opened. 580 * 581 * @return The children tree. 582 */ 583 ID2Count getID2ChildrenCount() 584 { 585 return id2childrenCount; 586 } 587 588 /** 589 * Look for an attribute index for the given attribute type. 590 * 591 * @param attrType The attribute type for which an attribute index is needed. 592 * @return The attribute index or null if there is none for that type. 593 */ 594 AttributeIndex getAttributeIndex(AttributeType attrType) 595 { 596 return attrIndexMap.get(attrType); 597 } 598 599 /** 600 * Look for a VLV index for the given index name. 601 * 602 * @param vlvIndexName The vlv index name for which an vlv index is needed. 603 * @return The VLV index or null if there is none with that name. 604 */ 605 VLVIndex getVLVIndex(String vlvIndexName) 606 { 607 return vlvIndexMap.get(vlvIndexName); 608 } 609 610 /** 611 * Retrieve all attribute indexes. 612 * 613 * @return All attribute indexes defined in this entry container. 614 */ 615 Collection<AttributeIndex> getAttributeIndexes() 616 { 617 return attrIndexMap.values(); 618 } 619 620 /** 621 * Retrieve all VLV indexes. 622 * 623 * @return The collection of VLV indexes defined in this entry container. 624 */ 625 Collection<VLVIndex> getVLVIndexes() 626 { 627 return vlvIndexMap.values(); 628 } 629 630 /** 631 * Determine the highest entryID in the entryContainer. 632 * The entryContainer must already be open. 633 * 634 * @param txn a non null transaction 635 * @return The highest entry ID. 636 * @throws StorageRuntimeException If an error occurs in the storage. 637 */ 638 EntryID getHighestEntryID(ReadableTransaction txn) throws StorageRuntimeException 639 { 640 Cursor<ByteString, ByteString> cursor = txn.openCursor(id2entry.getName()); 641 try 642 { 643 // Position a cursor on the last data item, and the key should give the highest ID. 644 if (cursor.positionToLastKey()) 645 { 646 return new EntryID(cursor.getKey()); 647 } 648 return new EntryID(0); 649 } 650 finally 651 { 652 cursor.close(); 653 } 654 } 655 656 boolean hasSubordinates(final DN dn) 657 { 658 try 659 { 660 return storage.read(new ReadOperation<Boolean>() 661 { 662 @Override 663 public Boolean run(final ReadableTransaction txn) throws Exception 664 { 665 try (final SequentialCursor<?, ?> cursor = dn2id.openChildrenCursor(txn, dn)) 666 { 667 return cursor.next(); 668 } 669 } 670 }); 671 } 672 catch (Exception e) 673 { 674 throw new StorageRuntimeException(e); 675 } 676 } 677 678 /** 679 * Determine the number of children entries for a given entry. 680 * 681 * @param entryDN The distinguished name of the entry. 682 * @return The number of children entries for the given entry or -1 if 683 * the entry does not exist. 684 * @throws StorageRuntimeException If an error occurs in the storage. 685 */ 686 long getNumberOfChildren(final DN entryDN) throws StorageRuntimeException 687 { 688 try 689 { 690 return storage.read(new ReadOperation<Long>() 691 { 692 @Override 693 public Long run(ReadableTransaction txn) throws Exception 694 { 695 final EntryID entryID = dn2id.get(txn, entryDN); 696 return entryID != null ? id2childrenCount.getCount(txn, entryID) : -1; 697 } 698 }); 699 } 700 catch (Exception e) 701 { 702 throw new StorageRuntimeException(e); 703 } 704 } 705 706 /** 707 * Processes the specified search in this entryContainer. 708 * Matching entries should be provided back to the core server using the 709 * <CODE>SearchOperation.returnEntry</CODE> method. 710 * 711 * @param searchOperation The search operation to be processed. 712 * @throws DirectoryException 713 * If a problem occurs while processing the 714 * search. 715 * @throws StorageRuntimeException If an error occurs in the storage. 716 * @throws CanceledOperationException if this operation should be cancelled. 717 */ 718 void search(final SearchOperation searchOperation) 719 throws DirectoryException, StorageRuntimeException, CanceledOperationException 720 { 721 try 722 { 723 storage.read(new ReadOperation<Void>() 724 { 725 @Override 726 public Void run(final ReadableTransaction txn) throws Exception 727 { 728 DN aBaseDN = searchOperation.getBaseDN(); 729 SearchScope searchScope = searchOperation.getScope(); 730 731 PagedResultsControl pageRequest = searchOperation.getRequestControl(PagedResultsControl.DECODER); 732 ServerSideSortRequestControl sortRequest = 733 searchOperation.getRequestControl(ServerSideSortRequestControl.DECODER); 734 if (sortRequest != null && !sortRequest.containsSortKeys() && sortRequest.isCritical()) 735 { 736 /* 737 * If the control's criticality field is true then the server SHOULD 738 * do the following: return unavailableCriticalExtension as a return 739 * code in the searchResultDone message; include the 740 * sortKeyResponseControl in the searchResultDone message, and not 741 * send back any search result entries. 742 */ 743 searchOperation.addResponseControl(new ServerSideSortResponseControl(NO_SUCH_ATTRIBUTE, null)); 744 searchOperation.setResultCode(ResultCode.UNAVAILABLE_CRITICAL_EXTENSION); 745 return null; 746 } 747 748 VLVRequestControl vlvRequest = searchOperation.getRequestControl(VLVRequestControl.DECODER); 749 if (vlvRequest != null && pageRequest != null) 750 { 751 throw new DirectoryException( 752 ResultCode.CONSTRAINT_VIOLATION, ERR_SEARCH_CANNOT_MIX_PAGEDRESULTS_AND_VLV.get()); 753 } 754 755 // Handle client abandon of paged results. 756 if (pageRequest != null) 757 { 758 if (pageRequest.getSize() == 0) 759 { 760 Control control = new PagedResultsControl(pageRequest.isCritical(), 0, null); 761 searchOperation.getResponseControls().add(control); 762 return null; 763 } 764 if (searchOperation.getSizeLimit() > 0 && pageRequest.getSize() >= searchOperation.getSizeLimit()) 765 { 766 // The RFC says : "If the page size is greater than or equal to the 767 // sizeLimit value, the server should ignore the control as the 768 // request can be satisfied in a single page" 769 pageRequest = null; 770 } 771 } 772 773 // Handle base-object search first. 774 if (searchScope == SearchScope.BASE_OBJECT) 775 { 776 final Entry baseEntry = fetchBaseEntry(txn, aBaseDN, searchScope); 777 if (!isManageDsaITOperation(searchOperation)) 778 { 779 dn2uri.checkTargetForReferral(baseEntry, searchOperation.getScope()); 780 } 781 782 if (searchOperation.getFilter().matchesEntry(baseEntry)) 783 { 784 searchOperation.returnEntry(baseEntry, null); 785 } 786 787 if (pageRequest != null) 788 { 789 // Indicate no more pages. 790 Control control = new PagedResultsControl(pageRequest.isCritical(), 0, null); 791 searchOperation.getResponseControls().add(control); 792 } 793 794 return null; 795 } 796 797 // Check whether the client requested debug information about the 798 // contribution of the indexes to the search. 799 StringBuilder debugBuffer = null; 800 if (searchOperation.getAttributes().contains(ATTR_DEBUG_SEARCH_INDEX)) 801 { 802 debugBuffer = new StringBuilder(); 803 } 804 805 EntryIDSet entryIDSet = null; 806 boolean candidatesAreInScope = false; 807 if (sortRequest != null) 808 { 809 for (VLVIndex vlvIndex : vlvIndexMap.values()) 810 { 811 try 812 { 813 entryIDSet = vlvIndex.evaluate(txn, searchOperation, sortRequest, vlvRequest, debugBuffer); 814 if (entryIDSet != null) 815 { 816 searchOperation.addResponseControl(new ServerSideSortResponseControl(SUCCESS, null)); 817 candidatesAreInScope = true; 818 break; 819 } 820 } 821 catch (DirectoryException de) 822 { 823 searchOperation.addResponseControl(new ServerSideSortResponseControl(de.getResultCode().intValue(), 824 null)); 825 826 if (sortRequest.isCritical()) 827 { 828 throw de; 829 } 830 } 831 } 832 } 833 834 if (entryIDSet == null) 835 { 836 if (processSearchWithVirtualAttributeRule(searchOperation, true)) 837 { 838 return null; 839 } 840 841 // Create an index filter to get the search result candidate entries 842 IndexFilter indexFilter = new IndexFilter( 843 EntryContainer.this, txn, searchOperation, debugBuffer, rootContainer.getMonitorProvider()); 844 845 // Evaluate the filter against the attribute indexes. 846 entryIDSet = indexFilter.evaluate(); 847 848 if (!isBelowFilterThreshold(entryIDSet)) 849 { 850 final int lookThroughLimit = searchOperation.getClientConnection().getLookthroughLimit(); 851 final int idSetLimit = 852 lookThroughLimit == 0 ? SCOPE_IDSET_LIMIT : Math.min(SCOPE_IDSET_LIMIT, lookThroughLimit); 853 854 final EntryIDSet scopeSet = getIDSetFromScope(txn, aBaseDN, searchScope, idSetLimit); 855 entryIDSet.retainAll(scopeSet); 856 if (debugBuffer != null) 857 { 858 debugBuffer.append(" scope=").append(searchScope); 859 scopeSet.toString(debugBuffer); 860 } 861 if (scopeSet.isDefined()) 862 { 863 // In this case we know that every candidate is in scope. 864 candidatesAreInScope = true; 865 } 866 } 867 868 if (sortRequest != null) 869 { 870 try 871 { 872 // If the sort key is not present, the sorting will generate the 873 // default ordering. VLV search request goes through as if 874 // this sort key was not found in the user entry. 875 entryIDSet = sort(txn, entryIDSet, searchOperation, sortRequest.getSortOrder(), vlvRequest); 876 if (sortRequest.containsSortKeys()) 877 { 878 searchOperation.addResponseControl(new ServerSideSortResponseControl(SUCCESS, null)); 879 } 880 else 881 { 882 /* 883 * There is no sort key associated with the sort control. 884 * Since it came here it means that the criticality is false 885 * so let the server return all search results unsorted and 886 * include the sortKeyResponseControl in the searchResultDone 887 * message. 888 */ 889 searchOperation.addResponseControl(new ServerSideSortResponseControl(NO_SUCH_ATTRIBUTE, null)); 890 } 891 } 892 catch (DirectoryException de) 893 { 894 searchOperation.addResponseControl(new ServerSideSortResponseControl(de.getResultCode().intValue(), 895 null)); 896 897 if (sortRequest.isCritical()) 898 { 899 throw de; 900 } 901 } 902 } 903 } 904 905 // If requested, construct and return a fictitious entry containing 906 // debug information, and no other entries. 907 if (debugBuffer != null) 908 { 909 debugBuffer.append(" final="); 910 entryIDSet.toString(debugBuffer); 911 912 Entry debugEntry = buildDebugSearchIndexEntry(debugBuffer); 913 searchOperation.returnEntry(debugEntry, null); 914 return null; 915 } 916 917 if (entryIDSet.isDefined()) 918 { 919 rootContainer.getMonitorProvider().updateIndexedSearchCount(); 920 searchIndexed(txn, entryIDSet, candidatesAreInScope, searchOperation, pageRequest); 921 } 922 else 923 { 924 rootContainer.getMonitorProvider().updateUnindexedSearchCount(); 925 926 searchOperation.addAdditionalLogItem(keyOnly(getClass(), "unindexed")); 927 928 if (processSearchWithVirtualAttributeRule(searchOperation, false)) 929 { 930 return null; 931 } 932 933 ClientConnection clientConnection = searchOperation.getClientConnection(); 934 if (!clientConnection.hasPrivilege(Privilege.UNINDEXED_SEARCH, searchOperation)) 935 { 936 throw new DirectoryException( 937 ResultCode.INSUFFICIENT_ACCESS_RIGHTS, ERR_SEARCH_UNINDEXED_INSUFFICIENT_PRIVILEGES.get()); 938 } 939 940 if (sortRequest != null) 941 { 942 // FIXME -- Add support for sorting unindexed searches using indexes 943 // like DSEE currently does. 944 searchOperation.addResponseControl(new ServerSideSortResponseControl(UNWILLING_TO_PERFORM, null)); 945 946 if (sortRequest.isCritical()) 947 { 948 throw new DirectoryException( 949 ResultCode.UNAVAILABLE_CRITICAL_EXTENSION, ERR_SEARCH_CANNOT_SORT_UNINDEXED.get()); 950 } 951 } 952 953 searchNotIndexed(txn, searchOperation, pageRequest); 954 } 955 return null; 956 } 957 958 private EntryIDSet getIDSetFromScope(final ReadableTransaction txn, DN aBaseDN, SearchScope searchScope, 959 int idSetLimit) throws DirectoryException 960 { 961 final EntryIDSet scopeSet; 962 try 963 { 964 switch (searchScope.asEnum()) 965 { 966 case BASE_OBJECT: 967 try (final SequentialCursor<?, EntryID> scopeCursor = dn2id.openCursor(txn, aBaseDN)) 968 { 969 scopeSet = EntryIDSet.newDefinedSet(scopeCursor.getValue().longValue()); 970 } 971 break; 972 case SINGLE_LEVEL: 973 try (final SequentialCursor<?, EntryID> scopeCursor = dn2id.openChildrenCursor(txn, aBaseDN)) 974 { 975 scopeSet = newIDSetFromCursor(scopeCursor, false, idSetLimit); 976 } 977 break; 978 case SUBORDINATES: 979 case WHOLE_SUBTREE: 980 try (final SequentialCursor<?, EntryID> scopeCursor = dn2id.openSubordinatesCursor(txn, aBaseDN)) 981 { 982 scopeSet = newIDSetFromCursor(scopeCursor, searchScope.equals(SearchScope.WHOLE_SUBTREE), idSetLimit); 983 } 984 break; 985 default: 986 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, 987 CoreMessages.INFO_ERROR_SEARCH_SCOPE_NOT_ALLOWED.get()); 988 } 989 } 990 catch (NoSuchElementException e) 991 { 992 throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, ERR_SEARCH_NO_SUCH_OBJECT.get(aBaseDN), 993 getMatchedDN(txn, aBaseDN), e); 994 } 995 return scopeSet; 996 } 997 }); 998 } 999 catch (Exception e) 1000 { 1001 throwAllowedExceptionTypes(e, DirectoryException.class, CanceledOperationException.class); 1002 } 1003 } 1004 1005 private static EntryIDSet newIDSetFromCursor(SequentialCursor<?, EntryID> cursor, boolean includeCurrent, 1006 int idSetLimit) 1007 { 1008 long entryIDs[] = new long[idSetLimit]; 1009 int offset = 0; 1010 if (includeCurrent) 1011 { 1012 entryIDs[offset++] = cursor.getValue().longValue(); 1013 } 1014 1015 while(offset < idSetLimit && cursor.next()) 1016 { 1017 entryIDs[offset++] = cursor.getValue().longValue(); 1018 } 1019 1020 if (offset == idSetLimit && cursor.next()) 1021 { 1022 return EntryIDSet.newUndefinedSet(); 1023 } 1024 else if (offset != idSetLimit) 1025 { 1026 entryIDs = Arrays.copyOf(entryIDs, offset); 1027 } 1028 Arrays.sort(entryIDs); 1029 1030 return EntryIDSet.newDefinedSet(entryIDs); 1031 } 1032 1033 private <E1 extends Exception, E2 extends Exception> 1034 void throwAllowedExceptionTypes(Exception e, Class<E1> clazz1, Class<E2> clazz2) 1035 throws E1, E2 1036 { 1037 throwIfPossible(e, clazz1, clazz2); 1038 if (e.getCause() != null) 1039 { 1040 throwIfPossible(e.getCause(), clazz1, clazz2); 1041 } 1042 else if (e instanceof StorageRuntimeException) 1043 { 1044 throw (StorageRuntimeException) e; 1045 } 1046 throw new StorageRuntimeException(e); 1047 } 1048 1049 private static <E1 extends Exception, E2 extends Exception> void throwIfPossible(final Throwable cause, 1050 Class<E1> clazz1, Class<E2> clazz2) throws E1, E2 1051 { 1052 if (clazz1.isAssignableFrom(cause.getClass())) 1053 { 1054 throw clazz1.cast(cause); 1055 } 1056 else if (clazz2.isAssignableFrom(cause.getClass())) 1057 { 1058 throw clazz2.cast(cause); 1059 } 1060 } 1061 1062 private static boolean processSearchWithVirtualAttributeRule(final SearchOperation searchOperation, 1063 boolean isPreIndexed) 1064 { 1065 for (VirtualAttributeRule rule : DirectoryServer.getVirtualAttributes()) 1066 { 1067 VirtualAttributeProvider<?> provider = rule.getProvider(); 1068 if (provider.isSearchable(rule, searchOperation, isPreIndexed)) 1069 { 1070 provider.processSearch(rule, searchOperation); 1071 return true; 1072 } 1073 } 1074 return false; 1075 } 1076 1077 private static Entry buildDebugSearchIndexEntry(StringBuilder debugBuffer) throws DirectoryException 1078 { 1079 Attribute attr = Attributes.create(ATTR_DEBUG_SEARCH_INDEX, debugBuffer.toString()); 1080 Entry entry = new Entry(DN.valueOf("cn=debugsearch"), null, null, null); 1081 entry.addAttribute(attr, new ArrayList<ByteString>()); 1082 return entry; 1083 } 1084 1085 /** 1086 * We were not able to obtain a set of candidate entry IDs for the 1087 * search from the indexes. 1088 * <p> 1089 * Here we are relying on the DN key order to ensure children are 1090 * returned after their parents. 1091 * <ul> 1092 * <li>iterate through a subtree range of the DN tree 1093 * <li>discard non-children DNs if the search scope is single level 1094 * <li>fetch the entry by ID from the entry cache or the entry tree 1095 * <li>return the entry if it matches the filter 1096 * </ul> 1097 * 1098 * @param searchOperation The search operation. 1099 * @param pageRequest A Paged Results control, or null if none. 1100 * @throws DirectoryException If an error prevented the search from being 1101 * processed. 1102 */ 1103 private void searchNotIndexed(ReadableTransaction txn, SearchOperation searchOperation, 1104 PagedResultsControl pageRequest) throws DirectoryException, CanceledOperationException 1105 { 1106 DN aBaseDN = searchOperation.getBaseDN(); 1107 SearchScope searchScope = searchOperation.getScope(); 1108 boolean manageDsaIT = isManageDsaITOperation(searchOperation); 1109 1110 // The base entry must already have been processed if this is 1111 // a request for the next page in paged results. So we skip 1112 // the base entry processing if the cookie is set. 1113 if (pageRequest == null || pageRequest.getCookie().length() == 0) 1114 { 1115 final Entry baseEntry = fetchBaseEntry(txn, aBaseDN, searchScope); 1116 if (!manageDsaIT) 1117 { 1118 dn2uri.checkTargetForReferral(baseEntry, searchScope); 1119 } 1120 1121 /* 1122 * The base entry is only included for whole subtree search. 1123 */ 1124 if (searchScope == SearchScope.WHOLE_SUBTREE 1125 && searchOperation.getFilter().matchesEntry(baseEntry)) 1126 { 1127 searchOperation.returnEntry(baseEntry, null); 1128 } 1129 1130 if (!manageDsaIT 1131 && !dn2uri.returnSearchReferences(txn, searchOperation) 1132 && pageRequest != null) 1133 { 1134 // Indicate no more pages. 1135 Control control = new PagedResultsControl(pageRequest.isCritical(), 0, null); 1136 searchOperation.getResponseControls().add(control); 1137 } 1138 } 1139 1140 /* 1141 * We will iterate forwards through a range of the dn2id keys to 1142 * find subordinates of the target entry from the top of the tree 1143 * downwards. For example, any subordinates of "dc=example,dc=com" appear 1144 * in dn2id with a key ending in ",dc=example,dc=com". The entry 1145 * "cn=joe,ou=people,dc=example,dc=com" will appear after the entry 1146 * "ou=people,dc=example,dc=com". 1147 */ 1148 ByteString baseDNKey = dnToDNKey(aBaseDN, this.baseDN.size()); 1149 ByteStringBuilder suffix = beforeKey(baseDNKey); 1150 ByteStringBuilder end = afterKey(baseDNKey); 1151 1152 // Set the starting value. 1153 ByteSequence begin; 1154 if (pageRequest != null && pageRequest.getCookie().length() != 0) 1155 { 1156 // The cookie contains the DN of the next entry to be returned. 1157 try 1158 { 1159 begin = ByteString.wrap(pageRequest.getCookie().toByteArray()); 1160 } 1161 catch (Exception e) 1162 { 1163 logger.traceException(e); 1164 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, 1165 ERR_INVALID_PAGED_RESULTS_COOKIE.get(pageRequest.getCookie().toHexString()), e); 1166 } 1167 } 1168 else 1169 { 1170 // Set the starting value to the suffix. 1171 begin = suffix; 1172 } 1173 1174 int lookthroughCount = 0; 1175 int lookthroughLimit = searchOperation.getClientConnection().getLookthroughLimit(); 1176 1177 try 1178 { 1179 final Cursor<ByteString, ByteString> cursor = txn.openCursor(dn2id.getName()); 1180 try 1181 { 1182 // Initialize the cursor very close to the starting value. 1183 boolean success = cursor.positionToKeyOrNext(begin); 1184 1185 // Step forward until we pass the ending value. 1186 while (success && cursor.getKey().compareTo(end) < 0) 1187 { 1188 if (lookthroughLimit > 0 && lookthroughCount > lookthroughLimit) 1189 { 1190 // Lookthrough limit exceeded 1191 searchOperation.setResultCode(ResultCode.ADMIN_LIMIT_EXCEEDED); 1192 searchOperation.appendErrorMessage(NOTE_LOOKTHROUGH_LIMIT_EXCEEDED.get(lookthroughLimit)); 1193 return; 1194 } 1195 1196 // We have found a subordinate entry. 1197 EntryID entryID = new EntryID(cursor.getValue()); 1198 boolean isInScope = 1199 searchScope != SearchScope.SINGLE_LEVEL 1200 // Check if this entry is an immediate child. 1201 || findDNKeyParent(cursor.getKey()) == baseDNKey.length(); 1202 if (isInScope) 1203 { 1204 // Process the candidate entry. 1205 final Entry entry = getEntry(txn, entryID); 1206 if (entry != null) 1207 { 1208 lookthroughCount++; 1209 1210 if ((manageDsaIT || entry.getReferralURLs() == null) 1211 && searchOperation.getFilter().matchesEntry(entry)) 1212 { 1213 if (pageRequest != null 1214 && searchOperation.getEntriesSent() == pageRequest.getSize()) 1215 { 1216 // The current page is full. 1217 // Set the cookie to remember where we were. 1218 ByteString cookie = cursor.getKey(); 1219 Control control = new PagedResultsControl(pageRequest.isCritical(), 0, cookie); 1220 searchOperation.getResponseControls().add(control); 1221 return; 1222 } 1223 1224 if (!searchOperation.returnEntry(entry, null)) 1225 { 1226 // We have been told to discontinue processing of the 1227 // search. This could be due to size limit exceeded or 1228 // operation cancelled. 1229 return; 1230 } 1231 } 1232 } 1233 } 1234 1235 searchOperation.checkIfCanceled(false); 1236 1237 // Move to the next record. 1238 success = cursor.next(); 1239 } 1240 } 1241 finally 1242 { 1243 cursor.close(); 1244 } 1245 } 1246 catch (StorageRuntimeException e) 1247 { 1248 logger.traceException(e); 1249 } 1250 1251 if (pageRequest != null) 1252 { 1253 // Indicate no more pages. 1254 Control control = new PagedResultsControl(pageRequest.isCritical(), 0, null); 1255 searchOperation.getResponseControls().add(control); 1256 } 1257 } 1258 1259 /** 1260 * Returns the entry corresponding to the provided entryID. 1261 * 1262 * @param txn a non null transaction 1263 * @param entryID 1264 * the id of the entry to retrieve 1265 * @return the entry corresponding to the provided entryID 1266 * @throws DirectoryException 1267 * If an error occurs retrieving the entry 1268 */ 1269 private Entry getEntry(ReadableTransaction txn, EntryID entryID) throws DirectoryException 1270 { 1271 // Try the entry cache first. 1272 final EntryCache<?> entryCache = getEntryCache(); 1273 final Entry cacheEntry = entryCache.getEntry(backendID, entryID.longValue()); 1274 if (cacheEntry != null) 1275 { 1276 return cacheEntry; 1277 } 1278 1279 final Entry entry = id2entry.get(txn, entryID); 1280 if (entry != null) 1281 { 1282 // Put the entry in the cache making sure not to overwrite a newer copy 1283 // that may have been inserted since the time we read the cache. 1284 entryCache.putEntryIfAbsent(entry, backendID, entryID.longValue()); 1285 } 1286 return entry; 1287 } 1288 1289 /** 1290 * We were able to obtain a set of candidate entry IDs for the 1291 * search from the indexes. 1292 * <p> 1293 * Here we are relying on ID order to ensure children are returned 1294 * after their parents. 1295 * <ul> 1296 * <li>Iterate through the candidate IDs 1297 * <li>fetch entry by ID from cache or id2entry 1298 * <li>put the entry in the cache if not present 1299 * <li>discard entries that are not in scope 1300 * <li>return entry if it matches the filter 1301 * </ul> 1302 * 1303 * @param entryIDSet The candidate entry IDs. 1304 * @param candidatesAreInScope true if it is certain that every candidate 1305 * entry is in the search scope. 1306 * @param searchOperation The search operation. 1307 * @param pageRequest A Paged Results control, or null if none. 1308 * @throws DirectoryException If an error prevented the search from being 1309 * processed. 1310 */ 1311 private void searchIndexed(ReadableTransaction txn, EntryIDSet entryIDSet, boolean candidatesAreInScope, 1312 SearchOperation searchOperation, PagedResultsControl pageRequest) throws DirectoryException, 1313 CanceledOperationException 1314 { 1315 SearchScope searchScope = searchOperation.getScope(); 1316 DN aBaseDN = searchOperation.getBaseDN(); 1317 boolean manageDsaIT = isManageDsaITOperation(searchOperation); 1318 boolean continueSearch = true; 1319 1320 // Set the starting value. 1321 EntryID begin = null; 1322 if (pageRequest != null && pageRequest.getCookie().length() != 0) 1323 { 1324 // The cookie contains the ID of the next entry to be returned. 1325 try 1326 { 1327 begin = new EntryID(pageRequest.getCookie()); 1328 } 1329 catch (Exception e) 1330 { 1331 logger.traceException(e); 1332 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, 1333 ERR_INVALID_PAGED_RESULTS_COOKIE.get(pageRequest.getCookie().toHexString()), e); 1334 } 1335 } 1336 else if (!manageDsaIT) 1337 { 1338 continueSearch = dn2uri.returnSearchReferences(txn, searchOperation); 1339 } 1340 1341 // Make sure the candidate list is smaller than the lookthrough limit 1342 int lookthroughLimit = 1343 searchOperation.getClientConnection().getLookthroughLimit(); 1344 if(lookthroughLimit > 0 && entryIDSet.size() > lookthroughLimit) 1345 { 1346 //Lookthrough limit exceeded 1347 searchOperation.setResultCode(ResultCode.ADMIN_LIMIT_EXCEEDED); 1348 searchOperation.appendErrorMessage(NOTE_LOOKTHROUGH_LIMIT_EXCEEDED.get(lookthroughLimit)); 1349 continueSearch = false; 1350 } 1351 1352 // Iterate through the index candidates. 1353 if (continueSearch) 1354 { 1355 final SearchFilter filter = searchOperation.getFilter(); 1356 for (Iterator<EntryID> it = entryIDSet.iterator(begin); it.hasNext();) 1357 { 1358 final EntryID id = it.next(); 1359 1360 Entry entry; 1361 try 1362 { 1363 entry = getEntry(txn, id); 1364 } 1365 catch (Exception e) 1366 { 1367 logger.traceException(e); 1368 continue; 1369 } 1370 1371 // Process the candidate entry. 1372 if (entry != null 1373 && isInScope(candidatesAreInScope, searchScope, aBaseDN, entry) 1374 && (manageDsaIT || entry.getReferralURLs() == null) 1375 && filter.matchesEntry(entry)) 1376 { 1377 if (pageRequest != null 1378 && searchOperation.getEntriesSent() == pageRequest.getSize()) 1379 { 1380 // The current page is full. 1381 // Set the cookie to remember where we were. 1382 ByteString cookie = id.toByteString(); 1383 Control control = new PagedResultsControl(pageRequest.isCritical(), 0, cookie); 1384 searchOperation.getResponseControls().add(control); 1385 return; 1386 } 1387 1388 if (!searchOperation.returnEntry(entry, null)) 1389 { 1390 // We have been told to discontinue processing of the 1391 // search. This could be due to size limit exceeded or 1392 // operation cancelled. 1393 break; 1394 } 1395 } 1396 } 1397 searchOperation.checkIfCanceled(false); 1398 } 1399 1400 // Before we return success from the search we must ensure the base entry 1401 // exists. However, if we have returned at least one entry or subordinate 1402 // reference it implies the base does exist, so we can omit the check. 1403 if (searchOperation.getEntriesSent() == 0 1404 && searchOperation.getReferencesSent() == 0) 1405 { 1406 final Entry baseEntry = fetchBaseEntry(txn, aBaseDN, searchScope); 1407 if (!manageDsaIT) 1408 { 1409 dn2uri.checkTargetForReferral(baseEntry, searchScope); 1410 } 1411 } 1412 1413 if (pageRequest != null) 1414 { 1415 // Indicate no more pages. 1416 Control control = new PagedResultsControl(pageRequest.isCritical(), 0, null); 1417 searchOperation.getResponseControls().add(control); 1418 } 1419 } 1420 1421 private boolean isInScope(boolean candidatesAreInScope, SearchScope searchScope, DN aBaseDN, Entry entry) 1422 { 1423 DN entryDN = entry.getName(); 1424 1425 if (candidatesAreInScope) 1426 { 1427 return true; 1428 } 1429 else if (searchScope == SearchScope.SINGLE_LEVEL) 1430 { 1431 // Check if this entry is an immediate child. 1432 if (entryDN.size() == aBaseDN.size() + 1 1433 && entryDN.isDescendantOf(aBaseDN)) 1434 { 1435 return true; 1436 } 1437 } 1438 else if (searchScope == SearchScope.WHOLE_SUBTREE) 1439 { 1440 if (entryDN.isDescendantOf(aBaseDN)) 1441 { 1442 return true; 1443 } 1444 } 1445 else if (searchScope == SearchScope.SUBORDINATES 1446 && entryDN.size() > aBaseDN.size() 1447 && entryDN.isDescendantOf(aBaseDN)) 1448 { 1449 return true; 1450 } 1451 return false; 1452 } 1453 1454 /** 1455 * Adds the provided entry to this tree. This method must ensure that the 1456 * entry is appropriate for the tree and that no entry already exists with 1457 * the same DN. The caller must hold a write lock on the DN of the provided 1458 * entry. 1459 * 1460 * @param entry The entry to add to this tree. 1461 * @param addOperation The add operation with which the new entry is 1462 * associated. This may be <CODE>null</CODE> for adds 1463 * performed internally. 1464 * @throws DirectoryException If a problem occurs while trying to add the 1465 * entry. 1466 * @throws StorageRuntimeException If an error occurs in the storage. 1467 * @throws CanceledOperationException if this operation should be cancelled. 1468 */ 1469 void addEntry(final Entry entry, final AddOperation addOperation) 1470 throws StorageRuntimeException, DirectoryException, CanceledOperationException 1471 { 1472 final DN parentDN = getParentWithinBase(entry.getName()); 1473 final EntryID entryID = rootContainer.getNextEntryID(); 1474 1475 // Insert into the indexes, in index configuration order. 1476 final IndexBuffer indexBuffer = new IndexBuffer(); 1477 indexInsertEntry(indexBuffer, entry, entryID); 1478 1479 final ByteString encodedEntry = id2entry.encode(entry); 1480 1481 try 1482 { 1483 storage.write(new WriteOperation() 1484 { 1485 @Override 1486 public void run(WriteableTransaction txn) throws Exception 1487 { 1488 try 1489 { 1490 // Check whether the entry already exists. 1491 if (dn2id.get(txn, entry.getName()) != null) 1492 { 1493 throw new DirectoryException(ResultCode.ENTRY_ALREADY_EXISTS, ERR_ADD_ENTRY_ALREADY_EXISTS.get( 1494 entry.getName())); 1495 } 1496 1497 // Check that the parent entry exists. 1498 EntryID parentID = null; 1499 if (parentDN != null) 1500 { 1501 // Check for referral entries above the target. 1502 dn2uri.targetEntryReferrals(txn, entry.getName(), null); 1503 1504 // Read the parent ID from dn2id. 1505 parentID = dn2id.get(txn, parentDN); 1506 if (parentID == null) 1507 { 1508 throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, 1509 ERR_ADD_NO_SUCH_OBJECT.get(entry.getName()), getMatchedDN(txn, baseDN), null); 1510 } 1511 id2childrenCount.addDelta(txn, parentID, 1); 1512 } 1513 1514 dn2id.put(txn, entry.getName(), entryID); 1515 dn2uri.addEntry(txn, entry); 1516 id2entry.put(txn, entryID, encodedEntry); 1517 1518 indexBuffer.flush(txn); 1519 1520 if (addOperation != null) 1521 { 1522 // One last check before committing 1523 addOperation.checkIfCanceled(true); 1524 } 1525 } 1526 catch (StorageRuntimeException | DirectoryException | CanceledOperationException e) 1527 { 1528 throw e; 1529 } 1530 catch (Exception e) 1531 { 1532 String msg = e.getMessage(); 1533 if (msg == null) 1534 { 1535 msg = stackTraceToSingleLineString(e); 1536 } 1537 throw new DirectoryException( 1538 DirectoryServer.getServerErrorResultCode(), ERR_UNCHECKED_EXCEPTION.get(msg), e); 1539 } 1540 } 1541 }); 1542 } 1543 catch (Exception e) 1544 { 1545 throwAllowedExceptionTypes(e, DirectoryException.class, CanceledOperationException.class); 1546 } 1547 1548 final EntryCache<?> entryCache = DirectoryServer.getEntryCache(); 1549 if (entryCache != null) 1550 { 1551 entryCache.putEntry(entry, backendID, entryID.longValue()); 1552 } 1553 } 1554 1555 /** 1556 * Removes the specified entry from this tree. This method must ensure 1557 * that the entry exists and that it does not have any subordinate entries 1558 * (unless the storage supports a subtree delete operation and the client 1559 * included the appropriate information in the request). The caller must hold 1560 * a write lock on the provided entry DN. 1561 * 1562 * @param entryDN The DN of the entry to remove from this tree. 1563 * @param deleteOperation The delete operation with which this action is 1564 * associated. This may be <CODE>null</CODE> for 1565 * deletes performed internally. 1566 * @throws DirectoryException If a problem occurs while trying to remove the 1567 * entry. 1568 * @throws StorageRuntimeException If an error occurs in the storage. 1569 * @throws CanceledOperationException if this operation should be cancelled. 1570 */ 1571 void deleteEntry(final DN entryDN, final DeleteOperation deleteOperation) 1572 throws DirectoryException, StorageRuntimeException, CanceledOperationException 1573 { 1574 final IndexBuffer indexBuffer = new IndexBuffer(); 1575 final boolean isSubtreeDelete = 1576 deleteOperation != null && deleteOperation.getRequestControl(SubtreeDeleteControl.DECODER) != null; 1577 1578 /* 1579 * We will iterate forwards through a range of the dn2id keys to find subordinates of the target entry from the top 1580 * of the tree downwards. 1581 */ 1582 final ByteString entryDNKey = dnToDNKey(entryDN, baseDN.size()); 1583 final ByteStringBuilder suffix = beforeKey(entryDNKey); 1584 final ByteStringBuilder end = afterKey(entryDNKey); 1585 1586 final DN parentDN = getParentWithinBase(entryDN); 1587 1588 try 1589 { 1590 storage.write(new WriteOperation() 1591 { 1592 @Override 1593 public void run(WriteableTransaction txn) throws Exception 1594 { 1595 try 1596 { 1597 // Check for referral entries above the target entry. 1598 dn2uri.targetEntryReferrals(txn, entryDN, null); 1599 1600 int subordinateEntriesDeleted = 0; 1601 1602 // Since everything under targetDN will be deleted, we only have to decrement the counter of targetDN's 1603 // parent. Other counters will be removed in deleteEntry() 1604 if (parentDN != null) { 1605 final EntryID parentID = dn2id.get(txn, parentDN); 1606 if ( parentID == null ) { 1607 throw new StorageRuntimeException(ERR_MISSING_DN2ID_RECORD.get(parentDN).toString()); 1608 } 1609 id2childrenCount.addDelta(txn, parentID, -1); 1610 } 1611 1612 Cursor<ByteString, ByteString> cursor = txn.openCursor(dn2id.getName()); 1613 try 1614 { 1615 // Step forward until we pass the ending value. 1616 boolean success = cursor.positionToKeyOrNext(suffix); 1617 while (success && cursor.getKey().compareTo(end) < 0) 1618 { 1619 // We have found a subordinate entry. 1620 if (!isSubtreeDelete) 1621 { 1622 // The subtree delete control was not specified and 1623 // the target entry is not a leaf. 1624 throw new DirectoryException(ResultCode.NOT_ALLOWED_ON_NONLEAF, ERR_DELETE_NOT_ALLOWED_ON_NONLEAF 1625 .get(entryDN)); 1626 } 1627 1628 /* 1629 * Delete this entry which by now must be a leaf because we have 1630 * been deleting from the bottom of the tree upwards. 1631 */ 1632 EntryID entryID = new EntryID(cursor.getValue()); 1633 1634 // Invoke any subordinate delete plugins on the entry. 1635 if (deleteOperation != null && !deleteOperation.isSynchronizationOperation()) 1636 { 1637 Entry subordinateEntry = id2entry.get(txn, entryID); 1638 SubordinateDelete pluginResult = 1639 getPluginConfigManager().invokeSubordinateDeletePlugins(deleteOperation, subordinateEntry); 1640 1641 if (!pluginResult.continueProcessing()) 1642 { 1643 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 1644 ERR_DELETE_ABORTED_BY_SUBORDINATE_PLUGIN.get(subordinateEntry.getName())); 1645 } 1646 } 1647 1648 deleteEntry(txn, indexBuffer, true, entryDN, cursor.getKey(), entryID); 1649 subordinateEntriesDeleted++; 1650 1651 if (deleteOperation != null) 1652 { 1653 deleteOperation.checkIfCanceled(false); 1654 } 1655 1656 // Get the next DN. 1657 success = cursor.next(); 1658 } 1659 } 1660 finally 1661 { 1662 cursor.close(); 1663 } 1664 1665 // draft-armijo-ldap-treedelete, 4.1 Tree Delete Semantics: 1666 // The server MUST NOT chase referrals stored in the tree. If 1667 // information about referrals is stored in this section of the 1668 // tree, this pointer will be deleted. 1669 boolean manageDsaIT = isSubtreeDelete || isManageDsaITOperation(deleteOperation); 1670 deleteEntry(txn, indexBuffer, manageDsaIT, entryDN, null, null); 1671 1672 indexBuffer.flush(txn); 1673 1674 if (deleteOperation != null) 1675 { 1676 // One last check before committing 1677 deleteOperation.checkIfCanceled(true); 1678 } 1679 1680 if (isSubtreeDelete) 1681 { 1682 deleteOperation.addAdditionalLogItem(unquotedKeyValue(getClass(), "deletedEntries", 1683 subordinateEntriesDeleted + 1)); 1684 } 1685 } 1686 catch (StorageRuntimeException | DirectoryException | CanceledOperationException e) 1687 { 1688 throw e; 1689 } 1690 catch (Exception e) 1691 { 1692 String msg = e.getMessage(); 1693 if (msg == null) 1694 { 1695 msg = stackTraceToSingleLineString(e); 1696 } 1697 throw new DirectoryException( 1698 DirectoryServer.getServerErrorResultCode(), ERR_UNCHECKED_EXCEPTION.get(msg), e); 1699 } 1700 } 1701 }); 1702 } 1703 catch (Exception e) 1704 { 1705 throwAllowedExceptionTypes(e, DirectoryException.class, CanceledOperationException.class); 1706 } 1707 } 1708 1709 private void deleteEntry(WriteableTransaction txn, 1710 IndexBuffer indexBuffer, 1711 boolean manageDsaIT, 1712 DN targetDN, 1713 ByteSequence leafDNKey, 1714 EntryID leafID) 1715 throws StorageRuntimeException, DirectoryException 1716 { 1717 if(leafID == null || leafDNKey == null) 1718 { 1719 // Read the entry ID from dn2id. 1720 if(leafDNKey == null) 1721 { 1722 leafDNKey = dnToDNKey(targetDN, baseDN.size()); 1723 } 1724 // FIXME: previously this used a RMW lock - see OPENDJ-1878. 1725 ByteString value = txn.read(dn2id.getName(), leafDNKey); 1726 if (value == null) 1727 { 1728 LocalizableMessage message = ERR_DELETE_NO_SUCH_OBJECT.get(targetDN); 1729 DN matchedDN = getMatchedDN(txn, baseDN); 1730 throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, message, matchedDN, null); 1731 } 1732 leafID = new EntryID(value); 1733 } 1734 1735 // Remove from dn2id. 1736 if (!txn.delete(dn2id.getName(), leafDNKey)) 1737 { 1738 // Do not expect to ever come through here. 1739 throw new DirectoryException( 1740 ResultCode.NO_SUCH_OBJECT, ERR_DELETE_NO_SUCH_OBJECT.get(leafDNKey), getMatchedDN(txn, baseDN), null); 1741 } 1742 1743 // Check that the entry exists in id2entry and read its contents. 1744 // FIXME: previously this used a RMW lock - see OPENDJ-1878. 1745 Entry entry = id2entry.get(txn, leafID); 1746 if (entry == null) 1747 { 1748 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 1749 ERR_MISSING_ID2ENTRY_RECORD.get(leafID)); 1750 } 1751 1752 if (!manageDsaIT) 1753 { 1754 dn2uri.checkTargetForReferral(entry, null); 1755 } 1756 1757 // Update the referral tree. 1758 dn2uri.deleteEntry(txn, entry); 1759 1760 // Remove from id2entry. 1761 if (!id2entry.remove(txn, leafID)) 1762 { 1763 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 1764 ERR_MISSING_ID2ENTRY_RECORD.get(leafID)); 1765 } 1766 1767 // Remove from the indexes, in index config order. 1768 indexRemoveEntry(indexBuffer, entry, leafID); 1769 1770 // Remove the children counter for this entry. 1771 id2childrenCount.deleteCount(txn, leafID); 1772 1773 // Remove the entry from the entry cache. 1774 EntryCache<?> entryCache = DirectoryServer.getEntryCache(); 1775 if (entryCache != null) 1776 { 1777 entryCache.removeEntry(entry.getName()); 1778 } 1779 } 1780 1781 /** 1782 * Indicates whether an entry with the specified DN exists. 1783 * 1784 * @param entryDN The DN of the entry for which to determine existence. 1785 * 1786 * @return <CODE>true</CODE> if the specified entry exists, 1787 * or <CODE>false</CODE> if it does not. 1788 * 1789 * @throws DirectoryException If a problem occurs while trying to make the 1790 * determination. 1791 */ 1792 private boolean entryExists(ReadableTransaction txn, final DN entryDN) throws DirectoryException 1793 { 1794 // Try the entry cache first. 1795 EntryCache<?> entryCache = DirectoryServer.getEntryCache(); 1796 if (entryCache != null && entryCache.containsEntry(entryDN)) 1797 { 1798 return true; 1799 } 1800 return dn2id.get(txn, entryDN) != null; 1801 } 1802 1803 1804 boolean entryExists(final DN entryDN) throws StorageRuntimeException 1805 { 1806 final EntryCache<?> entryCache = DirectoryServer.getEntryCache(); 1807 if (entryCache != null && entryCache.containsEntry(entryDN)) 1808 { 1809 return true; 1810 } 1811 1812 try 1813 { 1814 return storage.read(new ReadOperation<Boolean>() 1815 { 1816 @Override 1817 public Boolean run(ReadableTransaction txn) throws Exception 1818 { 1819 return dn2id.get(txn, entryDN) != null; 1820 } 1821 }); 1822 } 1823 catch (Exception e) 1824 { 1825 throw new StorageRuntimeException(e); 1826 } 1827 } 1828 1829 /** 1830 * Fetch an entry by DN, trying the entry cache first, then the tree. 1831 * Retrieves the requested entry, trying the entry cache first, 1832 * then the tree. 1833 * 1834 * @param entryDN The distinguished name of the entry to retrieve. 1835 * @return The requested entry, or <CODE>null</CODE> if the entry does not 1836 * exist. 1837 * @throws DirectoryException If a problem occurs while trying to retrieve 1838 * the entry. 1839 * @throws StorageRuntimeException An error occurred during a storage operation. 1840 */ 1841 Entry getEntry(final DN entryDN) throws StorageRuntimeException, DirectoryException 1842 { 1843 try 1844 { 1845 return storage.read(new ReadOperation<Entry>() 1846 { 1847 @Override 1848 public Entry run(ReadableTransaction txn) throws Exception 1849 { 1850 return getEntry0(txn, entryDN); 1851 } 1852 }); 1853 } 1854 catch (Exception e) 1855 { 1856 // it is not very clean to specify twice the same exception but it saves me some code for now 1857 throwAllowedExceptionTypes(e, DirectoryException.class, DirectoryException.class); 1858 return null; // it can never happen 1859 } 1860 } 1861 1862 private Entry getEntry0(ReadableTransaction txn, final DN entryDN) throws StorageRuntimeException, DirectoryException 1863 { 1864 final EntryCache<?> entryCache = DirectoryServer.getEntryCache(); 1865 if (entryCache != null) 1866 { 1867 final Entry entry = entryCache.getEntry(entryDN); 1868 if (entry != null) 1869 { 1870 return entry; 1871 } 1872 } 1873 1874 try 1875 { 1876 final EntryID entryID = dn2id.get(txn, entryDN); 1877 if (entryID == null) 1878 { 1879 // The entryDN does not exist. Check for referral entries above the target entry. 1880 dn2uri.targetEntryReferrals(txn, entryDN, null); 1881 return null; 1882 } 1883 1884 final Entry entry = id2entry.get(txn, entryID); 1885 if (entry != null && entryCache != null) 1886 { 1887 /* 1888 * Put the entry in the cache making sure not to overwrite a newer copy that may have been 1889 * inserted since the time we read the cache. 1890 */ 1891 entryCache.putEntryIfAbsent(entry, backendID, entryID.longValue()); 1892 } 1893 return entry; 1894 } 1895 catch (Exception e) 1896 { 1897 // it is not very clean to specify twice the same exception but it saves me some code for now 1898 throwAllowedExceptionTypes(e, DirectoryException.class, DirectoryException.class); 1899 return null; // unreachable 1900 } 1901 } 1902 1903 /** 1904 * The simplest case of replacing an entry in which the entry DN has 1905 * not changed. 1906 * 1907 * @param oldEntry The old contents of the entry 1908 * @param newEntry The new contents of the entry 1909 * @param modifyOperation The modify operation with which this action is 1910 * associated. This may be <CODE>null</CODE> for 1911 * modifications performed internally. 1912 * @throws StorageRuntimeException If an error occurs in the storage. 1913 * @throws DirectoryException If a Directory Server error occurs. 1914 * @throws CanceledOperationException if this operation should be cancelled. 1915 */ 1916 void replaceEntry(final Entry oldEntry, final Entry newEntry, final ModifyOperation modifyOperation) 1917 throws StorageRuntimeException, DirectoryException, CanceledOperationException 1918 { 1919 try 1920 { 1921 storage.write(new WriteOperation() 1922 { 1923 @Override 1924 public void run(WriteableTransaction txn) throws Exception 1925 { 1926 try 1927 { 1928 EntryID entryID = dn2id.get(txn, newEntry.getName()); 1929 if (entryID == null) 1930 { 1931 throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, 1932 ERR_MODIFY_NO_SUCH_OBJECT.get(newEntry.getName()), getMatchedDN(txn, baseDN), null); 1933 } 1934 1935 if (!isManageDsaITOperation(modifyOperation)) 1936 { 1937 // Check if the entry is a referral entry. 1938 dn2uri.checkTargetForReferral(oldEntry, null); 1939 } 1940 1941 // Update the referral tree. 1942 if (modifyOperation != null) 1943 { 1944 // In this case we know from the operation what the modifications were. 1945 List<Modification> mods = modifyOperation.getModifications(); 1946 dn2uri.modifyEntry(txn, oldEntry, newEntry, mods); 1947 } 1948 else 1949 { 1950 dn2uri.replaceEntry(txn, oldEntry, newEntry); 1951 } 1952 1953 // Replace id2entry. 1954 id2entry.put(txn, entryID, newEntry); 1955 1956 // Update the indexes. 1957 final IndexBuffer indexBuffer = new IndexBuffer(); 1958 if (modifyOperation != null) 1959 { 1960 // In this case we know from the operation what the modifications were. 1961 List<Modification> mods = modifyOperation.getModifications(); 1962 indexModifications(indexBuffer, oldEntry, newEntry, entryID, mods); 1963 } 1964 else 1965 { 1966 // The most optimal would be to figure out what the modifications were. 1967 indexRemoveEntry(indexBuffer, oldEntry, entryID); 1968 indexInsertEntry(indexBuffer, newEntry, entryID); 1969 } 1970 1971 indexBuffer.flush(txn); 1972 1973 if(modifyOperation != null) 1974 { 1975 // One last check before committing 1976 modifyOperation.checkIfCanceled(true); 1977 } 1978 1979 // Update the entry cache. 1980 EntryCache<?> entryCache = DirectoryServer.getEntryCache(); 1981 if (entryCache != null) 1982 { 1983 entryCache.putEntry(newEntry, backendID, entryID.longValue()); 1984 } 1985 } 1986 catch (StorageRuntimeException | DirectoryException | CanceledOperationException e) 1987 { 1988 throw e; 1989 } 1990 catch (Exception e) 1991 { 1992 String msg = e.getMessage(); 1993 if (msg == null) 1994 { 1995 msg = stackTraceToSingleLineString(e); 1996 } 1997 throw new DirectoryException( 1998 DirectoryServer.getServerErrorResultCode(), ERR_UNCHECKED_EXCEPTION.get(msg), e); 1999 } 2000 } 2001 }); 2002 } 2003 catch (Exception e) 2004 { 2005 throwAllowedExceptionTypes(e, DirectoryException.class, CanceledOperationException.class); 2006 } 2007 } 2008 2009 /** 2010 * Moves and/or renames the provided entry in this backend, altering any 2011 * subordinate entries as necessary. This must ensure that an entry already 2012 * exists with the provided current DN, and that no entry exists with the 2013 * target DN of the provided entry. The caller must hold write locks on both 2014 * the current DN and the new DN for the entry. 2015 * 2016 * @param currentDN The current DN of the entry to be replaced. 2017 * @param entry The new content to use for the entry. 2018 * @param modifyDNOperation The modify DN operation with which this action 2019 * is associated. This may be <CODE>null</CODE> 2020 * for modify DN operations performed internally. 2021 * @throws DirectoryException 2022 * If a problem occurs while trying to perform the rename. 2023 * @throws CanceledOperationException 2024 * If this backend noticed and reacted 2025 * to a request to cancel or abandon the 2026 * modify DN operation. 2027 * @throws StorageRuntimeException If an error occurs in the storage. 2028 */ 2029 void renameEntry(final DN currentDN, final Entry entry, final ModifyDNOperation modifyDNOperation) 2030 throws StorageRuntimeException, DirectoryException, CanceledOperationException 2031 { 2032 // FIXME: consistency + isolation cannot be maintained lock free - see OPENDJ-1878. 2033 try 2034 { 2035 storage.write(new WriteOperation() 2036 { 2037 @Override 2038 public void run(WriteableTransaction txn) throws Exception 2039 { 2040 DN oldSuperiorDN = getParentWithinBase(currentDN); 2041 DN newSuperiorDN = getParentWithinBase(entry.getName()); 2042 2043 final boolean isApexEntryMoved; 2044 if (oldSuperiorDN != null) 2045 { 2046 isApexEntryMoved = !oldSuperiorDN.equals(newSuperiorDN); 2047 } 2048 else if (newSuperiorDN != null) 2049 { 2050 isApexEntryMoved = !newSuperiorDN.equals(oldSuperiorDN); 2051 } 2052 else 2053 { 2054 isApexEntryMoved = false; 2055 } 2056 2057 final IndexBuffer buffer = new IndexBuffer(); 2058 2059 try 2060 { 2061 // Check whether the renamed entry already exists. 2062 if (!currentDN.equals(entry.getName()) && dn2id.get(txn, entry.getName()) != null) 2063 { 2064 LocalizableMessage message = ERR_MODIFYDN_ALREADY_EXISTS.get(entry.getName()); 2065 throw new DirectoryException(ResultCode.ENTRY_ALREADY_EXISTS, message); 2066 } 2067 2068 EntryID oldApexID = dn2id.get(txn, currentDN); 2069 if (oldApexID == null) 2070 { 2071 // Check for referral entries above the target entry. 2072 dn2uri.targetEntryReferrals(txn, currentDN, null); 2073 2074 throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, 2075 ERR_MODIFYDN_NO_SUCH_OBJECT.get(currentDN), getMatchedDN(txn, baseDN), null); 2076 } 2077 2078 Entry oldApexEntry = id2entry.get(txn, oldApexID); 2079 if (oldApexEntry == null) 2080 { 2081 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), ERR_MISSING_ID2ENTRY_RECORD 2082 .get(oldApexID)); 2083 } 2084 2085 if (!isManageDsaITOperation(modifyDNOperation)) 2086 { 2087 dn2uri.checkTargetForReferral(oldApexEntry, null); 2088 } 2089 2090 EntryID newApexID = oldApexID; 2091 if (newSuperiorDN != null && isApexEntryMoved) 2092 { 2093 /* 2094 * We want to preserve the invariant that the ID of an entry is 2095 * greater than its parent, since search results are returned in 2096 * ID order. 2097 */ 2098 EntryID newSuperiorID = dn2id.get(txn, newSuperiorDN); 2099 if (newSuperiorID == null) 2100 { 2101 throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, 2102 ERR_NEW_SUPERIOR_NO_SUCH_OBJECT.get(newSuperiorDN), getMatchedDN(txn, baseDN), null); 2103 } 2104 2105 if (newSuperiorID.compareTo(oldApexID) > 0) 2106 { 2107 // This move would break the above invariant so we must 2108 // renumber every entry that moves. This is even more 2109 // expensive since every entry has to be deleted from 2110 // and added back into the attribute indexes. 2111 newApexID = rootContainer.getNextEntryID(); 2112 2113 if (logger.isTraceEnabled()) 2114 { 2115 logger.trace("Move of target entry requires renumbering" + "all entries in the subtree. " 2116 + "Old DN: %s " + "New DN: %s " + "Old entry ID: %d " + "New entry ID: %d " 2117 + "New Superior ID: %d" + oldApexEntry.getName(), entry.getName(), oldApexID, 2118 newApexID, newSuperiorID); 2119 } 2120 } 2121 } 2122 2123 MovedEntry head = new MovedEntry(null, null, false); 2124 MovedEntry current = head; 2125 // Move or rename the apex entry. 2126 removeApexEntry(txn, buffer, oldSuperiorDN, oldApexID, newApexID, oldApexEntry, entry, isApexEntryMoved, 2127 modifyDNOperation, current); 2128 current = current.next; 2129 2130 /* 2131 * We will iterate forwards through a range of the dn2id keys to 2132 * find subordinates of the target entry from the top of the tree 2133 * downwards. 2134 */ 2135 ByteString currentDNKey = dnToDNKey(currentDN, baseDN.size()); 2136 ByteStringBuilder suffix = beforeKey(currentDNKey); 2137 ByteStringBuilder end = afterKey(currentDNKey); 2138 2139 Cursor<ByteString, ByteString> cursor = txn.openCursor(dn2id.getName()); 2140 try 2141 { 2142 2143 // Step forward until we pass the ending value. 2144 boolean success = cursor.positionToKeyOrNext(suffix); 2145 while (success && cursor.getKey().compareTo(end) < 0) 2146 { 2147 // We have found a subordinate entry. 2148 EntryID oldID = new EntryID(cursor.getValue()); 2149 Entry oldEntry = id2entry.get(txn, oldID); 2150 2151 // Construct the new DN of the entry. 2152 DN newDN = modDN(oldEntry.getName(), currentDN.size(), entry.getName()); 2153 2154 // Assign a new entry ID if we are renumbering. 2155 EntryID newID = oldID; 2156 if (!newApexID.equals(oldApexID)) 2157 { 2158 newID = rootContainer.getNextEntryID(); 2159 2160 if (logger.isTraceEnabled()) 2161 { 2162 logger.trace("Move of subordinate entry requires renumbering. " 2163 + "Old DN: %s New DN: %s Old entry ID: %d New entry ID: %d", 2164 oldEntry.getName(), newDN, oldID, newID); 2165 } 2166 } 2167 2168 // Move this entry. 2169 removeSubordinateEntry(txn, buffer, oldID, newID, oldEntry, newDN, modifyDNOperation, current); 2170 current = current.next; 2171 2172 if (modifyDNOperation != null) 2173 { 2174 modifyDNOperation.checkIfCanceled(false); 2175 } 2176 2177 // Get the next DN. 2178 success = cursor.next(); 2179 } 2180 } 2181 finally 2182 { 2183 cursor.close(); 2184 } 2185 2186 // Set current to the first moved entry and null out the head. 2187 // This will allow processed moved entries to be GCed. 2188 current = head.next; 2189 head = null; 2190 while (current != null) 2191 { 2192 addRenamedEntry(txn, buffer, current.entryID, current.entry, isApexEntryMoved, current.renumbered, 2193 modifyDNOperation); 2194 current = current.next; 2195 } 2196 buffer.flush(txn); 2197 2198 if (modifyDNOperation != null) 2199 { 2200 // One last check before committing 2201 modifyDNOperation.checkIfCanceled(true); 2202 } 2203 } 2204 catch (StorageRuntimeException | DirectoryException | CanceledOperationException e) 2205 { 2206 throw e; 2207 } 2208 catch (Exception e) 2209 { 2210 String msg = e.getMessage(); 2211 if (msg == null) 2212 { 2213 msg = stackTraceToSingleLineString(e); 2214 } 2215 throw new DirectoryException( 2216 DirectoryServer.getServerErrorResultCode(), ERR_UNCHECKED_EXCEPTION.get(msg), e); 2217 } 2218 } 2219 }); 2220 } 2221 catch (Exception e) 2222 { 2223 throwAllowedExceptionTypes(e, DirectoryException.class, CanceledOperationException.class); 2224 } 2225 } 2226 2227 /** Represents an renamed entry that was deleted from but yet to be added back. */ 2228 private static final class MovedEntry 2229 { 2230 private EntryID entryID; 2231 private Entry entry; 2232 private MovedEntry next; 2233 private boolean renumbered; 2234 2235 private MovedEntry(EntryID entryID, Entry entry, boolean renumbered) 2236 { 2237 this.entryID = entryID; 2238 this.entry = entry; 2239 this.renumbered = renumbered; 2240 } 2241 } 2242 2243 private void addRenamedEntry(WriteableTransaction txn, IndexBuffer buffer, 2244 EntryID newID, 2245 Entry newEntry, 2246 boolean isApexEntryMoved, 2247 boolean renumbered, 2248 ModifyDNOperation modifyDNOperation) 2249 throws DirectoryException, StorageRuntimeException 2250 { 2251 // FIXME: the core server should validate that the new subtree location is empty. 2252 dn2id.put(txn, newEntry.getName(), newID); 2253 id2entry.put(txn, newID, newEntry); 2254 dn2uri.addEntry(txn, newEntry); 2255 2256 if (renumbered || modifyDNOperation == null) 2257 { 2258 // Reindex the entry with the new ID. 2259 indexInsertEntry(buffer, newEntry, newID); 2260 } 2261 2262 if(isApexEntryMoved) 2263 { 2264 final DN parentDN = getParentWithinBase(newEntry.getName()); 2265 if (parentDN != null) 2266 { 2267 id2childrenCount.addDelta(txn, dn2id.get(txn, parentDN), 1); 2268 } 2269 } 2270 } 2271 2272 private void removeApexEntry(WriteableTransaction txn, IndexBuffer buffer, 2273 DN oldSuperiorDN, 2274 EntryID oldID, EntryID newID, 2275 Entry oldEntry, Entry newEntry, 2276 boolean isApexEntryMoved, 2277 ModifyDNOperation modifyDNOperation, 2278 MovedEntry tail) 2279 throws DirectoryException, StorageRuntimeException 2280 { 2281 DN oldDN = oldEntry.getName(); 2282 2283 // Remove the old DN from dn2id. 2284 dn2id.remove(txn, oldDN); 2285 2286 // Remove old ID from id2entry and put the new entry 2287 // (old entry with new DN) in id2entry. 2288 if (!newID.equals(oldID)) 2289 { 2290 id2entry.remove(txn, oldID); 2291 } 2292 2293 // Update any referral records. 2294 dn2uri.deleteEntry(txn, oldEntry); 2295 2296 tail.next = new MovedEntry(newID, newEntry, !newID.equals(oldID)); 2297 2298 if(oldSuperiorDN != null && isApexEntryMoved) 2299 { 2300 // Since entry has moved, oldSuperiorDN has lost a child 2301 id2childrenCount.addDelta(txn, dn2id.get(txn, oldSuperiorDN), -1); 2302 } 2303 2304 if (!newID.equals(oldID)) 2305 { 2306 id2childrenCount.addDelta(txn, newID, id2childrenCount.deleteCount(txn, oldID)); 2307 } 2308 2309 if (!newID.equals(oldID) || modifyDNOperation == null) 2310 { 2311 // Reindex the entry with the new ID. 2312 indexRemoveEntry(buffer, oldEntry, oldID); 2313 } 2314 else 2315 { 2316 // Update the indexes if needed. 2317 indexModifications(buffer, oldEntry, newEntry, oldID, 2318 modifyDNOperation.getModifications()); 2319 } 2320 2321 // Remove the entry from the entry cache. 2322 EntryCache<?> entryCache = DirectoryServer.getEntryCache(); 2323 if (entryCache != null) 2324 { 2325 entryCache.removeEntry(oldDN); 2326 } 2327 } 2328 2329 private void removeSubordinateEntry(WriteableTransaction txn, IndexBuffer buffer, 2330 EntryID oldID, EntryID newID, 2331 Entry oldEntry, DN newDN, 2332 ModifyDNOperation modifyDNOperation, 2333 MovedEntry tail) 2334 throws DirectoryException, StorageRuntimeException 2335 { 2336 DN oldDN = oldEntry.getName(); 2337 Entry newEntry = oldEntry.duplicate(false); 2338 newEntry.setDN(newDN); 2339 List<Modification> modifications = 2340 Collections.unmodifiableList(new ArrayList<Modification>(0)); 2341 2342 // Create a new entry that is a copy of the old entry but with the new DN. 2343 // Also invoke any subordinate modify DN plugins on the entry. 2344 // FIXME -- At the present time, we don't support subordinate modify DN 2345 // plugins that make changes to subordinate entries and therefore 2346 // provide an unmodifiable list for the modifications element. 2347 // FIXME -- This will need to be updated appropriately if we decided that 2348 // these plugins should be invoked for synchronization 2349 // operations. 2350 if (modifyDNOperation != null && !modifyDNOperation.isSynchronizationOperation()) 2351 { 2352 SubordinateModifyDN pluginResult = 2353 getPluginConfigManager().invokeSubordinateModifyDNPlugins( 2354 modifyDNOperation, oldEntry, newEntry, modifications); 2355 2356 if (!pluginResult.continueProcessing()) 2357 { 2358 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 2359 ERR_MODIFYDN_ABORTED_BY_SUBORDINATE_PLUGIN.get(oldDN, newDN)); 2360 } 2361 2362 if (! modifications.isEmpty()) 2363 { 2364 LocalizableMessageBuilder invalidReason = new LocalizableMessageBuilder(); 2365 if (! newEntry.conformsToSchema(null, false, false, false, 2366 invalidReason)) 2367 { 2368 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 2369 ERR_MODIFYDN_ABORTED_BY_SUBORDINATE_SCHEMA_ERROR.get(oldDN, newDN, invalidReason)); 2370 } 2371 } 2372 } 2373 2374 // Remove the old DN from dn2id. 2375 dn2id.remove(txn, oldDN); 2376 2377 // Remove old ID from id2entry and put the new entry 2378 // (old entry with new DN) in id2entry. 2379 if (!newID.equals(oldID)) 2380 { 2381 id2entry.remove(txn, oldID); 2382 } 2383 2384 // Update any referral records. 2385 dn2uri.deleteEntry(txn, oldEntry); 2386 2387 tail.next = new MovedEntry(newID, newEntry, !newID.equals(oldID)); 2388 2389 if (!newID.equals(oldID)) 2390 { 2391 id2childrenCount.deleteCount(txn, oldID); 2392 2393 // Reindex the entry with the new ID. 2394 indexRemoveEntry(buffer, oldEntry, oldID); 2395 } 2396 else if (!modifications.isEmpty()) 2397 { 2398 // Update the indexes. 2399 indexModifications(buffer, oldEntry, newEntry, oldID, modifications); 2400 } 2401 2402 // Remove the entry from the entry cache. 2403 EntryCache<?> entryCache = DirectoryServer.getEntryCache(); 2404 if (entryCache != null) 2405 { 2406 entryCache.removeEntry(oldDN); 2407 } 2408 } 2409 2410 /** 2411 * Make a new DN for a subordinate entry of a renamed or moved entry. 2412 * 2413 * @param oldDN The current DN of the subordinate entry. 2414 * @param oldSuffixLen The current DN length of the renamed or moved entry. 2415 * @param newSuffixDN The new DN of the renamed or moved entry. 2416 * @return The new DN of the subordinate entry. 2417 */ 2418 static DN modDN(DN oldDN, int oldSuffixLen, DN newSuffixDN) 2419 { 2420 int oldDNNumComponents = oldDN.size(); 2421 int oldDNKeepComponents = oldDNNumComponents - oldSuffixLen; 2422 int newSuffixDNComponents = newSuffixDN.size(); 2423 2424 RDN[] newDNComponents = new RDN[oldDNKeepComponents+newSuffixDNComponents]; 2425 for (int i=0; i < oldDNKeepComponents; i++) 2426 { 2427 newDNComponents[i] = oldDN.getRDN(i); 2428 } 2429 2430 for (int i=oldDNKeepComponents, j=0; j < newSuffixDNComponents; i++,j++) 2431 { 2432 newDNComponents[i] = newSuffixDN.getRDN(j); 2433 } 2434 2435 return new DN(newDNComponents); 2436 } 2437 2438 /** 2439 * Insert a new entry into the attribute indexes. 2440 * 2441 * @param buffer The index buffer used to buffer up the index changes. 2442 * @param entry The entry to be inserted into the indexes. 2443 * @param entryID The ID of the entry to be inserted into the indexes. 2444 * @throws StorageRuntimeException If an error occurs in the storage. 2445 * @throws DirectoryException If a Directory Server error occurs. 2446 */ 2447 private void indexInsertEntry(IndexBuffer buffer, Entry entry, EntryID entryID) 2448 throws StorageRuntimeException, DirectoryException 2449 { 2450 for (AttributeIndex index : attrIndexMap.values()) 2451 { 2452 index.addEntry(buffer, entryID, entry); 2453 } 2454 2455 for (VLVIndex vlvIndex : vlvIndexMap.values()) 2456 { 2457 vlvIndex.addEntry(buffer, entryID, entry); 2458 } 2459 } 2460 2461 /** 2462 * Remove an entry from the attribute indexes. 2463 * 2464 * @param buffer The index buffer used to buffer up the index changes. 2465 * @param entry The entry to be removed from the indexes. 2466 * @param entryID The ID of the entry to be removed from the indexes. 2467 * @throws StorageRuntimeException If an error occurs in the storage. 2468 * @throws DirectoryException If a Directory Server error occurs. 2469 */ 2470 private void indexRemoveEntry(IndexBuffer buffer, Entry entry, EntryID entryID) 2471 throws StorageRuntimeException, DirectoryException 2472 { 2473 for (AttributeIndex index : attrIndexMap.values()) 2474 { 2475 index.removeEntry(buffer, entryID, entry); 2476 } 2477 2478 for (VLVIndex vlvIndex : vlvIndexMap.values()) 2479 { 2480 vlvIndex.removeEntry(buffer, entryID, entry); 2481 } 2482 } 2483 2484 /** 2485 * Update the attribute indexes to reflect the changes to the 2486 * attributes of an entry resulting from a sequence of modifications. 2487 * 2488 * @param buffer The index buffer used to buffer up the index changes. 2489 * @param oldEntry The contents of the entry before the change. 2490 * @param newEntry The contents of the entry after the change. 2491 * @param entryID The ID of the entry that was changed. 2492 * @param mods The sequence of modifications made to the entry. 2493 * @throws StorageRuntimeException If an error occurs in the storage. 2494 * @throws DirectoryException If a Directory Server error occurs. 2495 */ 2496 private void indexModifications(IndexBuffer buffer, Entry oldEntry, Entry newEntry, 2497 EntryID entryID, List<Modification> mods) 2498 throws StorageRuntimeException, DirectoryException 2499 { 2500 // Process in index configuration order. 2501 for (AttributeIndex index : attrIndexMap.values()) 2502 { 2503 // Check whether any modifications apply to this indexed attribute. 2504 if (isAttributeModified(index, mods)) 2505 { 2506 index.modifyEntry(buffer, entryID, oldEntry, newEntry); 2507 } 2508 } 2509 2510 for(VLVIndex vlvIndex : vlvIndexMap.values()) 2511 { 2512 vlvIndex.modifyEntry(buffer, entryID, oldEntry, newEntry, mods); 2513 } 2514 } 2515 2516 /** 2517 * Get a count of the number of entries stored in this entry container including the baseDN 2518 * 2519 * @param txn 2520 * a non null transaction 2521 * @return The number of entries stored in this entry container including the baseDN. 2522 * @throws StorageRuntimeException 2523 * If an error occurs in the storage. 2524 */ 2525 long getNumberOfEntriesInBaseDN() throws StorageRuntimeException 2526 { 2527 try 2528 { 2529 return storage.read(new ReadOperation<Long>() 2530 { 2531 @Override 2532 public Long run(ReadableTransaction txn) throws Exception 2533 { 2534 return getNumberOfEntriesInBaseDN0(txn); 2535 } 2536 }); 2537 } 2538 catch (Exception e) 2539 { 2540 throw new StorageRuntimeException(e); 2541 } 2542 } 2543 2544 long getNumberOfEntriesInBaseDN0(ReadableTransaction txn) 2545 { 2546 final int baseDnIfExists = dn2id.get(txn, baseDN) != null ? 1 : 0; 2547 return id2childrenCount.getTotalCount(txn) + baseDnIfExists; 2548 } 2549 2550 /** 2551 * Determine whether the provided operation has the ManageDsaIT request 2552 * control. 2553 * @param operation The operation for which the determination is to be made. 2554 * @return true if the operation has the ManageDsaIT request control, or false 2555 * if not. 2556 */ 2557 private static boolean isManageDsaITOperation(Operation operation) 2558 { 2559 if(operation != null) 2560 { 2561 List<Control> controls = operation.getRequestControls(); 2562 if (controls != null) 2563 { 2564 for (Control control : controls) 2565 { 2566 if (ServerConstants.OID_MANAGE_DSAIT_CONTROL.equals(control.getOID())) 2567 { 2568 return true; 2569 } 2570 } 2571 } 2572 } 2573 return false; 2574 } 2575 2576 /** 2577 * Delete this entry container from disk. The entry container should be 2578 * closed before calling this method. 2579 * 2580 * @param txn a non null transaction 2581 * @throws StorageRuntimeException If an error occurs while removing the entry 2582 * container. 2583 */ 2584 void delete(WriteableTransaction txn) throws StorageRuntimeException 2585 { 2586 for (Tree tree : listTrees()) 2587 { 2588 tree.delete(txn); 2589 } 2590 } 2591 2592 /** 2593 * Remove a tree from disk. 2594 * 2595 * @param txn a non null transaction 2596 * @param tree The tree container to remove. 2597 * @throws StorageRuntimeException If an error occurs while attempting to delete the tree. 2598 */ 2599 void deleteTree(WriteableTransaction txn, Tree tree) throws StorageRuntimeException 2600 { 2601 if(tree == state) 2602 { 2603 // The state tree can not be removed individually. 2604 return; 2605 } 2606 tree.delete(txn); 2607 if(tree instanceof Index) 2608 { 2609 state.deleteRecord(txn, tree.getName()); 2610 } 2611 } 2612 2613 /** 2614 * This method constructs a container name from a base DN. Only alphanumeric 2615 * characters are preserved, all other characters are replaced with an 2616 * underscore. 2617 * 2618 * @return The container name for the base DN. 2619 */ 2620 String getTreePrefix() 2621 { 2622 return treePrefix; 2623 } 2624 2625 /** 2626 * Sets a new tree prefix for this entry container and rename all 2627 * existing trees in use by this entry container. 2628 * 2629 * @param txn the transaction for renaming Trees 2630 * @param newBaseDN The new tree prefix to use. 2631 * @throws StorageRuntimeException If an error occurs in the storage. 2632 */ 2633 void setTreePrefix(WriteableTransaction txn, final String newBaseDN) throws StorageRuntimeException 2634 { 2635 final List<Tree> allTrees = listTrees(); 2636 try 2637 { 2638 // Rename in transaction. 2639 for(Tree tree : allTrees) 2640 { 2641 TreeName oldName = tree.getName(); 2642 TreeName newName = oldName.replaceBaseDN(newBaseDN); 2643 txn.renameTree(oldName, newName); 2644 } 2645 // Only rename the containers if the txn succeeded. 2646 for (Tree tree : allTrees) 2647 { 2648 TreeName oldName = tree.getName(); 2649 TreeName newName = oldName.replaceBaseDN(newBaseDN); 2650 tree.setName(newName); 2651 } 2652 } 2653 catch (Exception e) 2654 { 2655 String msg = e.getMessage(); 2656 if (msg == null) 2657 { 2658 msg = stackTraceToSingleLineString(e); 2659 } 2660 throw new StorageRuntimeException(ERR_UNCHECKED_EXCEPTION.get(msg).toString(), e); 2661 } 2662 try 2663 { 2664 for(Tree tree : allTrees) 2665 { 2666 tree.open(txn, false); 2667 } 2668 } 2669 catch (Exception e) 2670 { 2671 String msg = e.getMessage(); 2672 if (msg == null) 2673 { 2674 msg = stackTraceToSingleLineString(e); 2675 } 2676 throw new StorageRuntimeException(ERR_UNCHECKED_EXCEPTION.get(msg).toString(), e); 2677 } 2678 } 2679 2680 @Override 2681 public DN getBaseDN() 2682 { 2683 return baseDN; 2684 } 2685 2686 /** 2687 * Get the parent of a DN in the scope of the base DN. 2688 * 2689 * @param dn A DN which is in the scope of the base DN. 2690 * @return The parent DN, or null if the given DN is the base DN. 2691 */ 2692 DN getParentWithinBase(DN dn) 2693 { 2694 if (dn.equals(baseDN)) 2695 { 2696 return null; 2697 } 2698 return dn.parent(); 2699 } 2700 2701 @Override 2702 public boolean isConfigurationChangeAcceptable( 2703 PluggableBackendCfg cfg, List<LocalizableMessage> unacceptableReasons) 2704 { 2705 // This is always true because only all config attributes used 2706 // by the entry container should be validated by the admin framework. 2707 return true; 2708 } 2709 2710 @Override 2711 public ConfigChangeResult applyConfigurationChange(final PluggableBackendCfg cfg) 2712 { 2713 final ConfigChangeResult ccr = new ConfigChangeResult(); 2714 2715 exclusiveLock.lock(); 2716 try 2717 { 2718 storage.write(new WriteOperation() 2719 { 2720 @Override 2721 public void run(WriteableTransaction txn) throws Exception 2722 { 2723 DataConfig entryDataConfig = new DataConfig(cfg.isEntriesCompressed(), 2724 cfg.isCompactEncoding(), rootContainer.getCompressedSchema()); 2725 id2entry.setDataConfig(entryDataConfig); 2726 2727 EntryContainer.this.config = cfg; 2728 } 2729 }); 2730 } 2731 catch (Exception e) 2732 { 2733 ccr.setResultCode(DirectoryServer.getServerErrorResultCode()); 2734 ccr.addMessage(LocalizableMessage.raw(stackTraceToSingleLineString(e))); 2735 } 2736 finally 2737 { 2738 exclusiveLock.unlock(); 2739 } 2740 2741 return ccr; 2742 } 2743 2744 /** 2745 * Clear the contents of this entry container. 2746 * 2747 * @throws StorageRuntimeException If an error occurs while removing the entry 2748 * container. 2749 */ 2750 public void clear() throws StorageRuntimeException 2751 { 2752 try 2753 { 2754 storage.write(new WriteOperation() 2755 { 2756 @Override 2757 public void run(WriteableTransaction txn) throws Exception 2758 { 2759 clear0(txn); 2760 } 2761 }); 2762 } 2763 catch (Exception e) 2764 { 2765 throw new StorageRuntimeException(e); 2766 } 2767 } 2768 2769 private void clear0(WriteableTransaction txn) throws StorageRuntimeException 2770 { 2771 final List<Tree> allTrees = listTrees(); 2772 try 2773 { 2774 for (Tree tree : allTrees) 2775 { 2776 tree.delete(txn); 2777 } 2778 } 2779 finally 2780 { 2781 for(Tree tree : allTrees) 2782 { 2783 tree.open(txn, true); 2784 } 2785 2786 for (Tree tree : allTrees) 2787 { 2788 if (tree instanceof Index) 2789 { 2790 ((Index) tree).setTrusted(txn, true); 2791 } 2792 } 2793 } 2794 } 2795 2796 List<Tree> listTrees() 2797 { 2798 final List<Tree> allTrees = new ArrayList<>(); 2799 allTrees.add(dn2id); 2800 allTrees.add(id2entry); 2801 allTrees.add(dn2uri); 2802 allTrees.add(id2childrenCount); 2803 allTrees.add(state); 2804 2805 for (AttributeIndex index : attrIndexMap.values()) 2806 { 2807 allTrees.addAll(index.getNameToIndexes().values()); 2808 } 2809 2810 allTrees.addAll(vlvIndexMap.values()); 2811 return allTrees; 2812 } 2813 2814 /** 2815 * Clear the contents for a tree from disk. 2816 * 2817 * @param txn a non null transaction 2818 * @param tree The tree to clear. 2819 * @throws StorageRuntimeException if a storage error occurs. 2820 */ 2821 void clearTree(WriteableTransaction txn, Tree tree) throws StorageRuntimeException 2822 { 2823 try 2824 { 2825 tree.delete(txn); 2826 } 2827 finally 2828 { 2829 tree.open(txn, true); 2830 } 2831 if(logger.isTraceEnabled()) 2832 { 2833 logger.trace("Cleared the tree %s", tree.getName()); 2834 } 2835 } 2836 2837 2838 /** 2839 * Finds an existing entry whose DN is the closest ancestor of a given baseDN. 2840 * 2841 * @param baseDN the DN for which we are searching a matched DN. 2842 * @return the DN of the closest ancestor of the baseDN. 2843 * @throws DirectoryException If an error prevented the check of an 2844 * existing entry from being performed. 2845 */ 2846 private DN getMatchedDN(ReadableTransaction txn, DN baseDN) throws DirectoryException 2847 { 2848 DN parentDN = baseDN.getParentDNInSuffix(); 2849 while (parentDN != null && parentDN.isDescendantOf(getBaseDN())) 2850 { 2851 if (entryExists(txn, parentDN)) 2852 { 2853 return parentDN; 2854 } 2855 parentDN = parentDN.getParentDNInSuffix(); 2856 } 2857 return null; 2858 } 2859 2860 /** 2861 * Checks if any modifications apply to this indexed attribute. 2862 * @param index the indexed attributes. 2863 * @param mods the modifications to check for. 2864 * @return true if any apply, false otherwise. 2865 */ 2866 private static boolean isAttributeModified(AttributeIndex index, List<Modification> mods) 2867 { 2868 AttributeType indexAttributeType = index.getAttributeType(); 2869 List<AttributeType> subTypes = 2870 DirectoryServer.getSchema().getSubTypes(indexAttributeType); 2871 2872 for (Modification mod : mods) 2873 { 2874 Attribute modAttr = mod.getAttribute(); 2875 AttributeType modAttrType = modAttr.getAttributeType(); 2876 if (modAttrType.equals(indexAttributeType) 2877 || subTypes.contains(modAttrType)) 2878 { 2879 return true; 2880 } 2881 } 2882 return false; 2883 } 2884 2885 2886 /** 2887 * Fetch the base Entry of the EntryContainer. 2888 * @param baseDN the DN for the base entry 2889 * @param searchScope the scope under which this is fetched. 2890 * Scope is used for referral processing. 2891 * @return the Entry matching the baseDN. 2892 * @throws DirectoryException if the baseDN doesn't exist. 2893 */ 2894 private Entry fetchBaseEntry(ReadableTransaction txn, DN baseDN, SearchScope searchScope) 2895 throws DirectoryException 2896 { 2897 Entry baseEntry = null; 2898 try 2899 { 2900 baseEntry = getEntry0(txn, baseDN); 2901 } 2902 catch (Exception e) 2903 { 2904 logger.traceException(e); 2905 } 2906 2907 // The base entry must exist for a successful result. 2908 if (baseEntry == null) 2909 { 2910 // Check for referral entries above the base entry. 2911 dn2uri.targetEntryReferrals(txn, baseDN, searchScope); 2912 2913 throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, 2914 ERR_SEARCH_NO_SUCH_OBJECT.get(baseDN), getMatchedDN(txn, baseDN), null); 2915 } 2916 2917 return baseEntry; 2918 } 2919 2920 private EntryIDSet sort(ReadableTransaction txn, EntryIDSet entryIDSet, SearchOperation searchOperation, 2921 SortOrder sortOrder, VLVRequestControl vlvRequest) throws DirectoryException 2922 { 2923 if (!entryIDSet.isDefined()) 2924 { 2925 return newUndefinedSet(); 2926 } 2927 2928 final DN baseDN = searchOperation.getBaseDN(); 2929 final SearchScope scope = searchOperation.getScope(); 2930 final SearchFilter filter = searchOperation.getFilter(); 2931 2932 final TreeMap<ByteString, EntryID> sortMap = new TreeMap<>(); 2933 for (EntryID id : entryIDSet) 2934 { 2935 try 2936 { 2937 Entry e = getEntry(txn, id); 2938 if (e.matchesBaseAndScope(baseDN, scope) && filter.matchesEntry(e)) 2939 { 2940 sortMap.put(encodeVLVKey(sortOrder, e, id.longValue()), id); 2941 } 2942 } 2943 catch (Exception e) 2944 { 2945 LocalizableMessage message = ERR_ENTRYIDSORTER_CANNOT_EXAMINE_ENTRY.get(id, getExceptionMessage(e)); 2946 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); 2947 } 2948 } 2949 2950 // See if there is a VLV request to further pare down the set of results, and if there is where it should be 2951 // processed by offset or assertion value. 2952 if (vlvRequest == null) 2953 { 2954 return newDefinedSet(toArray(sortMap.values())); 2955 } 2956 2957 if (vlvRequest.getTargetType() == VLVRequestControl.TYPE_TARGET_BYOFFSET) 2958 { 2959 return sortByOffset(searchOperation, vlvRequest, sortMap); 2960 } 2961 return sortByGreaterThanOrEqualAssertion(searchOperation, vlvRequest, sortOrder, sortMap); 2962 } 2963 2964 private static final long[] toArray(Collection<EntryID> entryIDs) 2965 { 2966 final long[] array = new long[entryIDs.size()]; 2967 int i = 0; 2968 for (EntryID entryID : entryIDs) 2969 { 2970 array[i++] = entryID.longValue(); 2971 } 2972 return array; 2973 } 2974 2975 private static final EntryIDSet sortByGreaterThanOrEqualAssertion(SearchOperation searchOperation, 2976 VLVRequestControl vlvRequest, SortOrder sortOrder, final TreeMap<ByteString, EntryID> sortMap) 2977 throws DirectoryException 2978 { 2979 ByteString assertionValue = vlvRequest.getGreaterThanOrEqualAssertion(); 2980 ByteSequence encodedTargetAssertion = 2981 encodeTargetAssertion(sortOrder, assertionValue, searchOperation, sortMap.size()); 2982 2983 boolean targetFound = false; 2984 int index = 0; 2985 int targetIndex = 0; 2986 int startIndex = 0; 2987 int includedAfterCount = 0; 2988 long[] idSet = new long[sortMap.size()]; 2989 for (Map.Entry<ByteString, EntryID> entry : sortMap.entrySet()) 2990 { 2991 ByteString vlvKey = entry.getKey(); 2992 EntryID id = entry.getValue(); 2993 idSet[index++] = id.longValue(); 2994 2995 if (targetFound) 2996 { 2997 includedAfterCount++; 2998 if (includedAfterCount >= vlvRequest.getAfterCount()) 2999 { 3000 break; 3001 } 3002 } 3003 else 3004 { 3005 targetFound = vlvKey.compareTo(encodedTargetAssertion) >= 0; 3006 if (targetFound) 3007 { 3008 startIndex = Math.max(0, targetIndex - vlvRequest.getBeforeCount()); 3009 } 3010 targetIndex++; 3011 } 3012 } 3013 3014 final EntryIDSet result; 3015 if (targetFound) 3016 { 3017 final long[] array = new long[index - startIndex]; 3018 System.arraycopy(idSet, startIndex, array, 0, array.length); 3019 result = newDefinedSet(array); 3020 } 3021 else 3022 { 3023 /* 3024 * No entry was found to be greater than or equal to the sort key, so the target offset will 3025 * be one greater than the content count. 3026 */ 3027 targetIndex = sortMap.size() + 1; 3028 result = newDefinedSet(); 3029 } 3030 searchOperation.addResponseControl(new VLVResponseControl(targetIndex, sortMap.size(), LDAPResultCode.SUCCESS)); 3031 return result; 3032 } 3033 3034 private static final EntryIDSet sortByOffset(SearchOperation searchOperation, VLVRequestControl vlvRequest, 3035 TreeMap<ByteString, EntryID> sortMap) throws DirectoryException 3036 { 3037 int targetOffset = vlvRequest.getOffset(); 3038 if (targetOffset < 0) 3039 { 3040 // The client specified a negative target offset. This 3041 // should never be allowed. 3042 searchOperation.addResponseControl(new VLVResponseControl(targetOffset, sortMap.size(), 3043 LDAPResultCode.OFFSET_RANGE_ERROR)); 3044 3045 LocalizableMessage message = ERR_ENTRYIDSORTER_NEGATIVE_START_POS.get(); 3046 throw new DirectoryException(ResultCode.VIRTUAL_LIST_VIEW_ERROR, message); 3047 } 3048 3049 // This is an easy mistake to make, since VLV offsets start at 1 instead of 0. We'll assume the client meant 3050 // to use 1. 3051 targetOffset = (targetOffset == 0) ? 1 : targetOffset; 3052 3053 int beforeCount = vlvRequest.getBeforeCount(); 3054 int afterCount = vlvRequest.getAfterCount(); 3055 int listOffset = targetOffset - 1; // VLV offsets start at 1, not 0. 3056 int startPos = listOffset - beforeCount; 3057 if (startPos < 0) 3058 { 3059 // This can happen if beforeCount >= offset, and in this case we'll just adjust the start position to ignore 3060 // the range of beforeCount that doesn't exist. 3061 startPos = 0; 3062 beforeCount = listOffset; 3063 } 3064 else if (startPos >= sortMap.size()) 3065 { 3066 // The start position is beyond the end of the list. In this case, we'll assume that the start position was 3067 // one greater than the size of the list and will only return the beforeCount entries. 3068 targetOffset = sortMap.size() + 1; 3069 listOffset = sortMap.size(); 3070 startPos = listOffset - beforeCount; 3071 afterCount = 0; 3072 } 3073 3074 int count = 1 + beforeCount + afterCount; 3075 long[] sortedIDs = new long[count]; 3076 int treePos = 0; 3077 int arrayPos = 0; 3078 for (EntryID id : sortMap.values()) 3079 { 3080 if (treePos++ < startPos) 3081 { 3082 continue; 3083 } 3084 3085 sortedIDs[arrayPos++] = id.longValue(); 3086 if (arrayPos >= count) 3087 { 3088 break; 3089 } 3090 } 3091 3092 if (arrayPos < count) 3093 { 3094 // We don't have enough entries in the set to meet the requested page size, so we'll need to shorten the 3095 // array. 3096 sortedIDs = Arrays.copyOf(sortedIDs, arrayPos); 3097 } 3098 3099 searchOperation.addResponseControl(new VLVResponseControl(targetOffset, sortMap.size(), LDAPResultCode.SUCCESS)); 3100 return newDefinedSet(sortedIDs); 3101 } 3102 3103 /** Get the exclusive lock. */ 3104 void lock() 3105 { 3106 exclusiveLock.lock(); 3107 } 3108 3109 /** Unlock the exclusive lock. */ 3110 void unlock() 3111 { 3112 exclusiveLock.unlock(); 3113 } 3114 3115 @Override 3116 public String toString() { 3117 return treePrefix; 3118 } 3119}