Coverage report

  %line %branch
org.apache.jcs.auxiliary.disk.indexed.IndexedDiskCache$ShutdownHook
100% 
100% 

 1  
 package org.apache.jcs.auxiliary.disk.indexed;
 2  
 
 3  
 /*
 4  
  * Licensed to the Apache Software Foundation (ASF) under one
 5  
  * or more contributor license agreements.  See the NOTICE file
 6  
  * distributed with this work for additional information
 7  
  * regarding copyright ownership.  The ASF licenses this file
 8  
  * to you under the Apache License, Version 2.0 (the
 9  
  * "License"); you may not use this file except in compliance
 10  
  * with the License.  You may obtain a copy of the License at
 11  
  *
 12  
  *   http://www.apache.org/licenses/LICENSE-2.0
 13  
  *
 14  
  * Unless required by applicable law or agreed to in writing,
 15  
  * software distributed under the License is distributed on an
 16  
  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 17  
  * KIND, either express or implied.  See the License for the
 18  
  * specific language governing permissions and limitations
 19  
  * under the License.
 20  
  */
 21  
 
 22  
 import java.io.File;
 23  
 import java.io.IOException;
 24  
 import java.io.Serializable;
 25  
 import java.util.ArrayList;
 26  
 import java.util.Arrays;
 27  
 import java.util.Comparator;
 28  
 import java.util.ConcurrentModificationException;
 29  
 import java.util.HashMap;
 30  
 import java.util.HashSet;
 31  
 import java.util.Iterator;
 32  
 import java.util.LinkedList;
 33  
 import java.util.List;
 34  
 import java.util.Map;
 35  
 import java.util.Set;
 36  
 
 37  
 import org.apache.commons.logging.Log;
 38  
 import org.apache.commons.logging.LogFactory;
 39  
 import org.apache.jcs.auxiliary.AuxiliaryCacheAttributes;
 40  
 import org.apache.jcs.auxiliary.disk.AbstractDiskCache;
 41  
 import org.apache.jcs.auxiliary.disk.LRUMapJCS;
 42  
 import org.apache.jcs.engine.CacheConstants;
 43  
 import org.apache.jcs.engine.behavior.ICacheElement;
 44  
 import org.apache.jcs.engine.control.group.GroupAttrName;
 45  
 import org.apache.jcs.engine.control.group.GroupId;
 46  
 import org.apache.jcs.engine.stats.StatElement;
 47  
 import org.apache.jcs.engine.stats.Stats;
 48  
 import org.apache.jcs.engine.stats.behavior.IStatElement;
 49  
 import org.apache.jcs.engine.stats.behavior.IStats;
 50  
 import org.apache.jcs.utils.struct.SortedPreferentialArray;
 51  
 import org.apache.jcs.utils.timing.ElapsedTimer;
 52  
 
 53  
 import EDU.oswego.cs.dl.util.concurrent.ReentrantWriterPreferenceReadWriteLock;
 54  
 
 55  
 /**
 56  
  * Disk cache that uses a RandomAccessFile with keys stored in memory. The maximum number of keys
 57  
  * stored in memory is configurable. The disk cache tries to recycle spots on disk to limit file
 58  
  * expansion.
 59  
  */
 60  
 public class IndexedDiskCache
 61  
     extends AbstractDiskCache
 62  
 {
 63  
     /** Don't change */
 64  
     private static final long serialVersionUID = -265035607729729629L;
 65  
 
 66  
     /** The logger */
 67  
     private static final Log log = LogFactory.getLog( IndexedDiskCache.class );
 68  
 
 69  
     private final String logCacheName;
 70  
 
 71  
     private String fileName;
 72  
 
 73  
     private IndexedDisk dataFile;
 74  
 
 75  
     private IndexedDisk keyFile;
 76  
 
 77  
     private Map keyHash;
 78  
 
 79  
     private int maxKeySize;
 80  
 
 81  
     private File rafDir;
 82  
 
 83  
     boolean doRecycle = true;
 84  
 
 85  
     boolean isRealTimeOptimizationEnabled = true;
 86  
 
 87  
     boolean isShutdownOptimizationEnabled = true;
 88  
 
 89  
     /** are we currenlty optimizing the files */
 90  
     boolean isOptimizing = false;
 91  
 
 92  
     private int timesOptimized = 0;
 93  
 
 94  
     private volatile Thread currentOptimizationThread;
 95  
 
 96  
     /** used for counting the number of requests */
 97  
     private int removeCount = 0;
 98  
 
 99  
     private boolean queueInput = false;
 100  
 
 101  
     /** list where puts made during optimization are made */
 102  
     private LinkedList queuedPutList = new LinkedList();
 103  
 
 104  
     /** RECYLCE BIN -- array of empty spots */
 105  
     private SortedPreferentialArray recycle;
 106  
 
 107  
     private IndexedDiskCacheAttributes cattr;
 108  
 
 109  
     private int recycleCnt = 0;
 110  
 
 111  
     private int startupSize = 0;
 112  
 
 113  
     /** the number of bytes free on disk. */
 114  
     private long bytesFree = 0;
 115  
 
 116  
     private int hitCount = 0;
 117  
 
 118  
     /**
 119  
      * Use this lock to synchronize reads and writes to the underlying storage mechansism.
 120  
      */
 121  
     protected ReentrantWriterPreferenceReadWriteLock storageLock = new ReentrantWriterPreferenceReadWriteLock();
 122  
 
 123  
     /**
 124  
      * Constructor for the DiskCache object.
 125  
      * <p>
 126  
      * @param cattr
 127  
      */
 128  
     public IndexedDiskCache( IndexedDiskCacheAttributes cattr )
 129  
     {
 130  
         super( cattr );
 131  
 
 132  
         String rootDirName = cattr.getDiskPath();
 133  
         this.maxKeySize = cattr.getMaxKeySize();
 134  
 
 135  
         this.isRealTimeOptimizationEnabled = cattr.getOptimizeAtRemoveCount() > 0;
 136  
         this.isShutdownOptimizationEnabled = cattr.isOptimizeOnShutdown();
 137  
 
 138  
         this.cattr = cattr;
 139  
 
 140  
         this.logCacheName = "Region [" + getCacheName() + "] ";
 141  
         this.fileName = getCacheName();
 142  
 
 143  
         this.rafDir = new File( rootDirName );
 144  
         this.rafDir.mkdirs();
 145  
 
 146  
         if ( log.isInfoEnabled() )
 147  
         {
 148  
             log.info( logCacheName + "Cache file root directory: " + rootDirName );
 149  
         }
 150  
 
 151  
         try
 152  
         {
 153  
             this.dataFile = new IndexedDisk( class="keyword">new File( rafDir, fileName + ".data" ) );
 154  
 
 155  
             this.keyFile = new IndexedDisk( class="keyword">new File( rafDir, fileName + ".key" ) );
 156  
 
 157  
             // If the key file has contents, try to initialize the keys
 158  
             // from it. In no keys are loaded reset the data file.
 159  
 
 160  
             if ( keyFile.length() > 0 )
 161  
             {
 162  
                 loadKeys();
 163  
 
 164  
                 if ( keyHash.size() == 0 )
 165  
                 {
 166  
                     dataFile.reset();
 167  
                 }
 168  
                 else
 169  
                 {
 170  
                     boolean isOk = checkKeyDataConsistency( false );
 171  
                     if ( !isOk )
 172  
                     {
 173  
                         keyHash.clear();
 174  
                         keyFile.reset();
 175  
                         dataFile.reset();
 176  
                         log.warn( logCacheName + "Corruption detected.  Reset data and keys files." );
 177  
                     }
 178  
                     else
 179  
                     {
 180  
                         startupSize = keyHash.size();
 181  
                     }
 182  
                 }
 183  
             }
 184  
 
 185  
             // Otherwise start with a new empty map for the keys, and reset
 186  
             // the data file if it has contents.
 187  
 
 188  
             else
 189  
             {
 190  
                 initKeyMap();
 191  
 
 192  
                 if ( dataFile.length() > 0 )
 193  
                 {
 194  
                     dataFile.reset();
 195  
                 }
 196  
             }
 197  
 
 198  
             // create the recyclebin
 199  
             initRecycleBin();
 200  
 
 201  
             // Initialization finished successfully, so set alive to true.
 202  
             alive = true;
 203  
             if ( log.isInfoEnabled() )
 204  
             {
 205  
                 log.info( logCacheName + "Indexed Disk Cache is alive." );
 206  
             }
 207  
         }
 208  
         catch ( Exception e )
 209  
         {
 210  
             log.error( logCacheName + "Failure initializing for fileName: " + fileName + " and root directory: "
 211  
                 + rootDirName, e );
 212  
         }
 213  
 
 214  
         // TODO: Should we improve detection of whether or not the file should be optimized.
 215  
         if ( isRealTimeOptimizationEnabled && keyHash.size() > 0 )
 216  
         {
 217  
             // Kick off a real time optimization, in case we didn't do a final optimization.
 218  
             doOptimizeRealTime();
 219  
         }
 220  
         ShutdownHook shutdownHook = new ShutdownHook();
 221  
         Runtime.getRuntime().addShutdownHook( shutdownHook );
 222  
     }
 223  
 
 224  
     /**
 225  
      * Loads the keys from the .key file. The keys are stored in a HashMap on disk. This is
 226  
      * converted into a LRUMap.
 227  
      * <p>
 228  
      * @throws InterruptedException
 229  
      */
 230  
     protected void loadKeys()
 231  
         throws InterruptedException
 232  
     {
 233  
         storageLock.writeLock().acquire();
 234  
 
 235  
         if ( log.isDebugEnabled() )
 236  
         {
 237  
             log.debug( logCacheName + "Loading keys for " + keyFile.toString() );
 238  
         }
 239  
 
 240  
         try
 241  
         {
 242  
             // create a key map to use.
 243  
             initKeyMap();
 244  
 
 245  
             HashMap keys = (HashMap) keyFile.readObject( new IndexedDiskElementDescriptor( 0, (int) keyFile.length()
 246  
                 - IndexedDisk.RECORD_HEADER ) );
 247  
 
 248  
             if ( keys != null )
 249  
             {
 250  
                 if ( log.isDebugEnabled() )
 251  
                 {
 252  
                     log.debug( logCacheName + "Found " + keys.size() + " in keys file." );
 253  
                 }
 254  
 
 255  
                 keyHash.putAll( keys );
 256  
 
 257  
                 if ( log.isInfoEnabled() )
 258  
                 {
 259  
                     log.info( logCacheName + "Loaded keys from [" + fileName + "], key count: " + keyHash.size()
 260  
                         + "; up to " + maxKeySize + " will be available." );
 261  
                 }
 262  
             }
 263  
 
 264  
             if ( log.isDebugEnabled() )
 265  
             {
 266  
                 dump( false );
 267  
             }
 268  
         }
 269  
         catch ( Exception e )
 270  
         {
 271  
             log.error( logCacheName + "Problem loading keys for file " + fileName, e );
 272  
         }
 273  
         finally
 274  
         {
 275  
             storageLock.writeLock().release();
 276  
         }
 277  
     }
 278  
 
 279  
     /**
 280  
      * Check for minimal consitency between the keys and the datafile. Makes sure no starting
 281  
      * positions in the keys exceed the file length.
 282  
      * <p>
 283  
      * The caller should take the appropriate action if the keys and data are not consistent.
 284  
      * @param checkForDedOverlaps if <code>true</code>, do a more thorough check by checking for
 285  
      *            data overlap
 286  
      * @return <code>true</code> if the test passes
 287  
      */
 288  
     private boolean checkKeyDataConsistency( class="keyword">boolean checkForDedOverlaps )
 289  
     {
 290  
         ElapsedTimer timer = new ElapsedTimer();
 291  
         log.debug( logCacheName + "Performing inital consistency check" );
 292  
 
 293  
         boolean isOk = true;
 294  
         long fileLength = 0;
 295  
         try
 296  
         {
 297  
             fileLength = dataFile.length();
 298  
 
 299  
             Iterator itr = keyHash.entrySet().iterator();
 300  
             while ( itr.hasNext() )
 301  
             {
 302  
                 Map.Entry e = (Map.Entry) itr.next();
 303  
                 IndexedDiskElementDescriptor ded = (IndexedDiskElementDescriptor) e.getValue();
 304  
 
 305  
                 isOk = ( ded.pos + IndexedDisk.RECORD_HEADER + ded.len <= fileLength );
 306  
 
 307  
                 if ( !isOk )
 308  
                 {
 309  
                     log.warn( logCacheName + "The dataFile is corrupted!" + "\n raf.length() = " + fileLength
 310  
                         + "\n ded.pos = " + ded.pos );
 311  
                     break;
 312  
                 }
 313  
             }
 314  
 
 315  
             if ( isOk && checkForDedOverlaps )
 316  
             {
 317  
                 isOk = checkForDedOverlaps( createPositionSortedDescriptorList() );
 318  
             }
 319  
         }
 320  
         catch ( Exception e )
 321  
         {
 322  
             log.error( e );
 323  
             isOk = false;
 324  
         }
 325  
 
 326  
         if ( log.isInfoEnabled() )
 327  
         {
 328  
             log.info( logCacheName + "Finished inital consistency check, isOk = " + isOk + " in "
 329  
                 + timer.getElapsedTimeString() );
 330  
         }
 331  
 
 332  
         return isOk;
 333  
     }
 334  
 
 335  
     /**
 336  
      * Detects any overlapping elements. This expects a sorted list.
 337  
      * <p>
 338  
      * The total length of an item is IndexedDisk.RECORD_HEADER + ded.len.
 339  
      * <p>
 340  
      * @param sortedDescriptors
 341  
      * @return false if there are overlaps.
 342  
      */
 343  
     protected boolean checkForDedOverlaps( IndexedDiskElementDescriptor[] sortedDescriptors )
 344  
     {
 345  
         long start = System.currentTimeMillis();
 346  
         boolean isOk = true;
 347  
         long expectedNextPos = 0;
 348  
         for ( int i = 0; i < sortedDescriptors.length; i++ )
 349  
         {
 350  
             IndexedDiskElementDescriptor ded = sortedDescriptors[i];
 351  
             if ( expectedNextPos > ded.pos )
 352  
             {
 353  
                 log.error( logCacheName + "Corrupt file: overlapping deds " + ded );
 354  
                 isOk = false;
 355  
                 break;
 356  
             }
 357  
             else
 358  
             {
 359  
                 expectedNextPos = ded.pos + IndexedDisk.RECORD_HEADER + ded.len;
 360  
             }
 361  
         }
 362  
         long end = System.currentTimeMillis();
 363  
         if ( log.isDebugEnabled() )
 364  
         {
 365  
             log.debug( logCacheName + "Check for DED overlaps took " + ( end - start ) + " ms." );
 366  
         }
 367  
 
 368  
         return isOk;
 369  
     }
 370  
 
 371  
     /**
 372  
      * Saves key file to disk. This converts the LRUMap to a HashMap for deserialzation.
 373  
      */
 374  
     protected void saveKeys()
 375  
     {
 376  
         try
 377  
         {
 378  
             if ( log.isDebugEnabled() )
 379  
             {
 380  
                 log.debug( logCacheName + "Saving keys to: " + fileName + ", key count: " + keyHash.size() );
 381  
             }
 382  
 
 383  
             keyFile.reset();
 384  
 
 385  
             HashMap keys = new HashMap();
 386  
             keys.putAll( keyHash );
 387  
 
 388  
             if ( keys.size() > 0 )
 389  
             {
 390  
                 keyFile.writeObject( keys, 0 );
 391  
             }
 392  
 
 393  
             if ( log.isDebugEnabled() )
 394  
             {
 395  
                 log.debug( logCacheName + "Finished saving keys." );
 396  
             }
 397  
         }
 398  
         catch ( Exception e )
 399  
         {
 400  
             log.error( logCacheName + "Problem storing keys.", e );
 401  
         }
 402  
     }
 403  
 
 404  
     /**
 405  
      * Update the disk cache. Called from the Queue. Makes sure the Item has not been retireved from
 406  
      * purgatory while in queue for disk. Remove items from purgatory when they go to disk.
 407  
      * <p>
 408  
      * @param ce The ICacheElement to put to disk.
 409  
      */
 410  
     public void doUpdate( ICacheElement ce )
 411  
     {
 412  
         if ( !alive )
 413  
         {
 414  
             log.error( logCacheName + "No longer alive; aborting put of key = " + ce.getKey() );
 415  
             return;
 416  
         }
 417  
 
 418  
         if ( log.isDebugEnabled() )
 419  
         {
 420  
             log.debug( logCacheName + "Storing element on disk, key: " + ce.getKey() );
 421  
         }
 422  
 
 423  
         IndexedDiskElementDescriptor ded = null;
 424  
 
 425  
         // old element with same key
 426  
         IndexedDiskElementDescriptor old = null;
 427  
 
 428  
         try
 429  
         {
 430  
             byte[] data = IndexedDisk.serialize( ce );
 431  
 
 432  
             // make sure this only locks for one particular cache region
 433  
             storageLock.writeLock().acquire();
 434  
             try
 435  
             {
 436  
                 old = (IndexedDiskElementDescriptor) keyHash.get( ce.getKey() );
 437  
 
 438  
                 // Item with the same key already exists in file.
 439  
                 // Try to reuse the location if possible.
 440  
                 if ( old != null && data.length <= old.len )
 441  
                 {
 442  
                     // Reuse the old ded. The defrag relies on ded updates by reference, not
 443  
                     // replacement.
 444  
                     ded = old;
 445  
                     ded.len = data.length;
 446  
                 }
 447  
                 else
 448  
                 {
 449  
                     // we need this to compare in the recycle bin
 450  
                     ded = new IndexedDiskElementDescriptor( dataFile.length(), data.length );
 451  
 
 452  
                     if ( doRecycle )
 453  
                     {
 454  
                         IndexedDiskElementDescriptor rep = (IndexedDiskElementDescriptor) recycle
 455  
                             .takeNearestLargerOrEqual( ded );
 456  
                         if ( rep != null )
 457  
                         {
 458  
                             ded = rep;
 459  
                             ded.len = data.length;
 460  
                             recycleCnt++;
 461  
                             this.adjustBytesFree( ded, false );
 462  
                             if ( log.isDebugEnabled() )
 463  
                             {
 464  
                                 log.debug( logCacheName + "using recycled ded " + ded.pos + " rep.len = " + rep.len
 465  
                                     + " ded.len = " + ded.len );
 466  
                             }
 467  
                         }
 468  
                     }
 469  
 
 470  
                     // Put it in the map
 471  
                     keyHash.put( ce.getKey(), ded );
 472  
 
 473  
                     if ( queueInput )
 474  
                     {
 475  
                         queuedPutList.add( ded );
 476  
                         if ( log.isDebugEnabled() )
 477  
                         {
 478  
                             log.debug( logCacheName + "added to queued put list." + queuedPutList.size() );
 479  
                         }
 480  
                     }
 481  
                 }
 482  
 
 483  
                 dataFile.write( ded, data );
 484  
             }
 485  
             finally
 486  
             {
 487  
                 storageLock.writeLock().release();
 488  
             }
 489  
 
 490  
             if ( log.isDebugEnabled() )
 491  
             {
 492  
                 log.debug( logCacheName + "Put to file: " + fileName + ", key: " + ce.getKey() + ", position: "
 493  
                     + ded.pos + ", size: " + ded.len );
 494  
             }
 495  
         }
 496  
         catch ( ConcurrentModificationException cme )
 497  
         {
 498  
             // do nothing, this means it has gone back to memory mid
 499  
             // serialization
 500  
             if ( log.isDebugEnabled() )
 501  
             {
 502  
                 // this shouldn't be possible
 503  
                 log.debug( logCacheName + "Caught ConcurrentModificationException." + cme );
 504  
             }
 505  
         }
 506  
         catch ( Exception e )
 507  
         {
 508  
             log.error( logCacheName + "Failure updating element, key: " + ce.getKey() + " old: " + old, e );
 509  
         }
 510  
     }
 511  
 
 512  
     /**
 513  
      * @param key
 514  
      * @return ICacheElement or null
 515  
      * @see AbstractDiskCache#doGet
 516  
      */
 517  
     protected ICacheElement doGet( Serializable key )
 518  
     {
 519  
         if ( !alive )
 520  
         {
 521  
             log.error( logCacheName + "No longer alive so returning null for key = " + key );
 522  
 
 523  
             return null;
 524  
         }
 525  
 
 526  
         if ( log.isDebugEnabled() )
 527  
         {
 528  
             log.debug( logCacheName + "Trying to get from disk: " + key );
 529  
         }
 530  
 
 531  
         ICacheElement object = null;
 532  
         try
 533  
         {
 534  
             storageLock.readLock().acquire();
 535  
             try
 536  
             {
 537  
                 object = readElement( key );
 538  
             }
 539  
             finally
 540  
             {
 541  
                 storageLock.readLock().release();
 542  
             }
 543  
 
 544  
             if ( object != null )
 545  
             {
 546  
                 incrementHitCount();
 547  
             }
 548  
         }
 549  
         catch ( IOException ioe )
 550  
         {
 551  
             log.error( logCacheName + "Failure getting from disk, key = " + key, ioe );
 552  
             reset();
 553  
         }
 554  
         catch ( Exception e )
 555  
         {
 556  
             log.error( logCacheName + "Failure getting from disk, key = " + key, e );
 557  
         }
 558  
 
 559  
         return object;
 560  
     }
 561  
 
 562  
     /**
 563  
      * Reads the item from disk.
 564  
      * <p>
 565  
      * @param key
 566  
      * @return ICacheElement
 567  
      * @throws IOException
 568  
      */
 569  
     private ICacheElement readElement( Serializable key )
 570  
         throws IOException
 571  
     {
 572  
         ICacheElement object = null;
 573  
 
 574  
         IndexedDiskElementDescriptor ded = (IndexedDiskElementDescriptor) keyHash.get( key );
 575  
 
 576  
         if ( ded != null )
 577  
         {
 578  
             if ( log.isDebugEnabled() )
 579  
             {
 580  
                 log.debug( logCacheName + "Found on disk, key: " + key );
 581  
             }
 582  
             try
 583  
             {
 584  
                 object = (ICacheElement) dataFile.readObject( ded );
 585  
             }
 586  
             catch ( IOException e )
 587  
             {
 588  
                 log.error( logCacheName + "IO Exception, Problem reading object from file", e );
 589  
                 throw e;
 590  
             }
 591  
             catch ( Exception e )
 592  
             {
 593  
                 log.error( logCacheName + "Exception, Problem reading object from file", e );
 594  
                 throw new IOException( logCacheName + "Problem reading object from disk. " + e.getMessage() );
 595  
             }
 596  
         }
 597  
 
 598  
         return object;
 599  
     }
 600  
 
 601  
     /**
 602  
      * Gets the group keys from the disk.
 603  
      * <p>
 604  
      * @see org.apache.jcs.auxiliary.AuxiliaryCache#getGroupKeys(java.lang.String)
 605  
      */
 606  
     public Set getGroupKeys( String groupName )
 607  
     {
 608  
         GroupId groupId = new GroupId( cacheName, groupName );
 609  
         HashSet keys = new HashSet();
 610  
         try
 611  
         {
 612  
             storageLock.readLock().acquire();
 613  
 
 614  
             for ( Iterator itr = keyHash.keySet().iterator(); itr.hasNext(); )
 615  
             {
 616  
                 // Map.Entry entry = (Map.Entry) itr.next();
 617  
                 // Object k = entry.getKey();
 618  
                 Object k = itr.next();
 619  
                 if ( k instanceof GroupAttrName && ( (GroupAttrName) k ).groupId.equals( groupId ) )
 620  
                 {
 621  
                     keys.add( ( (GroupAttrName) k ).attrName );
 622  
                 }
 623  
             }
 624  
         }
 625  
         catch ( Exception e )
 626  
         {
 627  
             log.error( logCacheName + "Failure getting from disk, group = " + groupName, e );
 628  
         }
 629  
         finally
 630  
         {
 631  
             storageLock.readLock().release();
 632  
         }
 633  
 
 634  
         return keys;
 635  
     }
 636  
 
 637  
     /**
 638  
      * Returns true if the removal was succesful; or false if there is nothing to remove. Current
 639  
      * implementation always result in a disk orphan.
 640  
      * <p>
 641  
      * @return true if at least one item was removed.
 642  
      * @param key
 643  
      */
 644  
     public boolean doRemove( Serializable key )
 645  
     {
 646  
         if ( !alive )
 647  
         {
 648  
             log.error( logCacheName + "No longer alive so returning false for key = " + key );
 649  
             return false;
 650  
         }
 651  
 
 652  
         if ( key == null )
 653  
         {
 654  
             return false;
 655  
         }
 656  
 
 657  
         boolean reset = false;
 658  
         boolean removed = false;
 659  
         try
 660  
         {
 661  
             storageLock.writeLock().acquire();
 662  
 
 663  
             if ( key instanceof String && key.toString().endsWith( CacheConstants.NAME_COMPONENT_DELIMITER ) )
 664  
             {
 665  
                 removed = performPartialKeyRemoval( (String) key );
 666  
             }
 667  
             else if ( key instanceof GroupId )
 668  
             {
 669  
                 removed = performGroupRemoval( (GroupId) key );
 670  
             }
 671  
             else
 672  
             {
 673  
                 removed = performSingleKeyRemoval( key );
 674  
             }
 675  
         }
 676  
         catch ( Exception e )
 677  
         {
 678  
             log.error( logCacheName + "Problem removing element.", e );
 679  
             reset = true;
 680  
         }
 681  
         finally
 682  
         {
 683  
             storageLock.writeLock().release();
 684  
         }
 685  
 
 686  
         if ( reset )
 687  
         {
 688  
             reset();
 689  
         }
 690  
 
 691  
         // this increments the removecount.
 692  
         // there is no reason to call this if an item was not removed.
 693  
         if ( removed )
 694  
         {
 695  
             doOptimizeRealTime();
 696  
         }
 697  
 
 698  
         return removed;
 699  
     }
 700  
 
 701  
     /**
 702  
      * Iterates over the keyset. Builds a list of matches. Removes all the keys in the list . Does
 703  
      * not remove via the iterator, since the map impl may not support it.
 704  
      * <p>
 705  
      * This operates under a lock obtained in doRemove().
 706  
      * <p>
 707  
      * @param key
 708  
      * @return true if there was a match
 709  
      */
 710  
     private boolean performPartialKeyRemoval( String key )
 711  
     {
 712  
         boolean removed = false;
 713  
 
 714  
         // remove all keys of the same name hierarchy.
 715  
         List itemsToRemove = new LinkedList();
 716  
 
 717  
         Iterator iter = keyHash.entrySet().iterator();
 718  
         while ( iter.hasNext() )
 719  
         {
 720  
             Map.Entry entry = (Map.Entry) iter.next();
 721  
             Object k = entry.getKey();
 722  
             if ( k instanceof String && k.toString().startsWith( key.toString() ) )
 723  
             {
 724  
                 itemsToRemove.add( k );
 725  
             }
 726  
         }
 727  
 
 728  
         // remove matches.
 729  
         Iterator itToRemove = itemsToRemove.iterator();
 730  
         while ( itToRemove.hasNext() )
 731  
         {
 732  
             String fullKey = (String) itToRemove.next();
 733  
             IndexedDiskElementDescriptor ded = (IndexedDiskElementDescriptor) keyHash.get( fullKey );
 734  
             addToRecycleBin( ded );
 735  
             performSingleKeyRemoval( fullKey );
 736  
             removed = true;
 737  
             // TODO this needs to update the remove count separately
 738  
         }
 739  
 
 740  
         return removed;
 741  
     }
 742  
 
 743  
     /**
 744  
      * Remove all elements from the group. This does not use the iterator to remove. It builds a
 745  
      * list of group elemetns and then removes them one by one.
 746  
      * <p>
 747  
      * This operates under a lock obtained in doRemove().
 748  
      * <p>
 749  
      * @param key
 750  
      * @return true if an element was removed
 751  
      */
 752  
     private boolean performGroupRemoval( GroupId key )
 753  
     {
 754  
         boolean removed = false;
 755  
 
 756  
         // remove all keys of the same name group.
 757  
         List itemsToRemove = new LinkedList();
 758  
 
 759  
         // remove all keys of the same name hierarchy.
 760  
         Iterator iter = keyHash.entrySet().iterator();
 761  
         while ( iter.hasNext() )
 762  
         {
 763  
             Map.Entry entry = (Map.Entry) iter.next();
 764  
             Object k = entry.getKey();
 765  
 
 766  
             if ( k instanceof GroupAttrName && ( (GroupAttrName) k ).groupId.equals( key ) )
 767  
             {
 768  
                 itemsToRemove.add( k );
 769  
             }
 770  
         }
 771  
 
 772  
         // remove matches.
 773  
         Iterator itToRemove = itemsToRemove.iterator();
 774  
         while ( itToRemove.hasNext() )
 775  
         {
 776  
             GroupAttrName keyToRemove = (GroupAttrName) itToRemove.next();
 777  
             IndexedDiskElementDescriptor ded = (IndexedDiskElementDescriptor) keyHash.get( keyToRemove );
 778  
             addToRecycleBin( ded );
 779  
             performSingleKeyRemoval( keyToRemove );
 780  
             removed = true;
 781  
         }
 782  
         return removed;
 783  
     }
 784  
 
 785  
     /**
 786  
      * Removes an individual key from the cache.
 787  
      * <p>
 788  
      * This operates under a lock obtained in doRemove().
 789  
      * <p>
 790  
      * @param key
 791  
      * @return true if an item was removed.
 792  
      */
 793  
     private boolean performSingleKeyRemoval( Serializable key )
 794  
     {
 795  
         boolean removed;
 796  
         // remove single item.
 797  
         IndexedDiskElementDescriptor ded = (IndexedDiskElementDescriptor) keyHash.remove( key );
 798  
         removed = ( ded != null );
 799  
         addToRecycleBin( ded );
 800  
 
 801  
         if ( log.isDebugEnabled() )
 802  
         {
 803  
             log.debug( logCacheName + "Disk removal: Removed from key hash, key [" + key + "] removed = " + removed );
 804  
         }
 805  
         return removed;
 806  
     }
 807  
 
 808  
     /**
 809  
      * Remove all the items from the disk cache by reseting everything.
 810  
      */
 811  
     public void doRemoveAll()
 812  
     {
 813  
         try
 814  
         {
 815  
             reset();
 816  
         }
 817  
         catch ( Exception e )
 818  
         {
 819  
             log.error( logCacheName + "Problem removing all.", e );
 820  
             reset();
 821  
         }
 822  
     }
 823  
 
 824  
     /**
 825  
      * Reset effectively clears the disk cache, creating new files, recyclebins, and keymaps.
 826  
      * <p>
 827  
      * It can be used to handle errors by last resort, force content update, or removeall.
 828  
      */
 829  
     private void reset()
 830  
     {
 831  
         if ( log.isWarnEnabled() )
 832  
         {
 833  
             log.warn( logCacheName + "Reseting cache" );
 834  
         }
 835  
 
 836  
         try
 837  
         {
 838  
             storageLock.writeLock().acquire();
 839  
 
 840  
             if ( dataFile != null )
 841  
             {
 842  
                 dataFile.close();
 843  
             }
 844  
             File dataFileTemp = new File( rafDir, fileName + ".data" );
 845  
             dataFileTemp.delete();
 846  
 
 847  
             if ( keyFile != null )
 848  
             {
 849  
                 keyFile.close();
 850  
             }
 851  
             File keyFileTemp = new File( rafDir, fileName + ".key" );
 852  
             keyFileTemp.delete();
 853  
 
 854  
             dataFile = new IndexedDisk( class="keyword">new File( rafDir, fileName + ".data" ) );
 855  
 
 856  
             keyFile = new IndexedDisk( class="keyword">new File( rafDir, fileName + ".key" ) );
 857  
 
 858  
             initRecycleBin();
 859  
 
 860  
             initKeyMap();
 861  
         }
 862  
         catch ( Exception e )
 863  
         {
 864  
             log.error( logCacheName + "Failure reseting state", e );
 865  
         }
 866  
         finally
 867  
         {
 868  
             storageLock.writeLock().release();
 869  
         }
 870  
     }
 871  
 
 872  
     /**
 873  
      * If the maxKeySize is < 0, use 5000, no way to have an unlimted recycle bin right now, or one
 874  
      * less than the mazKeySize.
 875  
      */
 876  
     private void initRecycleBin()
 877  
     {
 878  
         int recycleBinSize = cattr.getMaxRecycleBinSize() >= 0 ? cattr.getMaxRecycleBinSize() : 0;
 879  
         recycle = new SortedPreferentialArray( recycleBinSize );
 880  
         if ( log.isDebugEnabled() )
 881  
         {
 882  
             log.debug( logCacheName + "Set recycle max Size to MaxRecycleBinSize: '" + recycleBinSize + "'" );
 883  
         }
 884  
     }
 885  
 
 886  
     /**
 887  
      * Create the map for keys that contain the index position on disk.
 888  
      */
 889  
     private void initKeyMap()
 890  
     {
 891  
         keyHash = null;
 892  
         if ( maxKeySize >= 0 )
 893  
         {
 894  
             keyHash = new LRUMap( maxKeySize );
 895  
             if ( log.isInfoEnabled() )
 896  
             {
 897  
                 log.info( logCacheName + "Set maxKeySize to: '" + maxKeySize + "'" );
 898  
             }
 899  
         }
 900  
         else
 901  
         {
 902  
             // If no max size, use a plain map for memory and processing efficiency.
 903  
             keyHash = new HashMap();
 904  
             // keyHash = Collections.synchronizedMap( new HashMap() );
 905  
             if ( log.isInfoEnabled() )
 906  
             {
 907  
                 log.info( logCacheName + "Set maxKeySize to unlimited'" );
 908  
             }
 909  
         }
 910  
     }
 911  
 
 912  
     /**
 913  
      * Dispose of the disk cache in a background thread. Joins against this thread to put a cap on
 914  
      * the disposal time.
 915  
      * <p>
 916  
      * @todo make dispose window configurable.
 917  
      */
 918  
     public void doDispose()
 919  
     {
 920  
         Runnable disR = new Runnable()
 921  
         {
 922  
             public void run()
 923  
             {
 924  
                 disposeInternal();
 925  
             }
 926  
         };
 927  
         Thread t = new Thread( disR, "IndexedDiskCache-DisposalThread" );
 928  
         t.start();
 929  
         // wait up to 60 seconds for dispose and then quit if not done.
 930  
         try
 931  
         {
 932  
             t.join( 60 * 1000 );
 933  
         }
 934  
         catch ( InterruptedException ex )
 935  
         {
 936  
             log.error( logCacheName + "Interrupted while waiting for disposal thread to finish.", ex );
 937  
         }
 938  
     }
 939  
 
 940  
     /**
 941  
      * Internal method that handles the disposal.
 942  
      */
 943  
     private void disposeInternal()
 944  
     {
 945  
         if ( !alive )
 946  
         {
 947  
             log.error( logCacheName + "Not alive and dispose was called, filename: " + fileName );
 948  
             return;
 949  
         }
 950  
 
 951  
         // Prevents any interaction with the cache while we're shutting down.
 952  
         alive = false;
 953  
 
 954  
         Thread optimizationThread = currentOptimizationThread;
 955  
         if ( isRealTimeOptimizationEnabled && optimizationThread != null )
 956  
         {
 957  
             // Join with the current optimization thread.
 958  
             if ( log.isDebugEnabled() )
 959  
             {
 960  
                 log.debug( logCacheName + "In dispose, optimization already " + "in progress; waiting for completion." );
 961  
             }
 962  
             try
 963  
             {
 964  
                 optimizationThread.join();
 965  
             }
 966  
             catch ( InterruptedException e )
 967  
             {
 968  
                 log.error( logCacheName + "Unable to join current optimization thread.", e );
 969  
             }
 970  
         }
 971  
         else if ( isShutdownOptimizationEnabled && this.getBytesFree() > 0 )
 972  
         {
 973  
             optimizeFile();
 974  
         }
 975  
 
 976  
         saveKeys();
 977  
 
 978  
         try
 979  
         {
 980  
             if ( log.isDebugEnabled() )
 981  
             {
 982  
                 log.debug( logCacheName + "Closing files, base filename: " + fileName );
 983  
             }
 984  
             dataFile.close();
 985  
             dataFile = null;
 986  
             keyFile.close();
 987  
             keyFile = null;
 988  
         }
 989  
         catch ( IOException e )
 990  
         {
 991  
             log.error( logCacheName + "Failure closing files in dispose, filename: " + fileName, e );
 992  
         }
 993  
 
 994  
         if ( log.isInfoEnabled() )
 995  
         {
 996  
             log.info( logCacheName + "Shutdown complete." );
 997  
         }
 998  
     }
 999  
 
 1000  
     /**
 1001  
      * Add descriptor to recycle bin if it is not null. Adds the length of the item to the bytes
 1002  
      * free.
 1003  
      * <p>
 1004  
      * @param ded
 1005  
      */
 1006  
     private void addToRecycleBin( IndexedDiskElementDescriptor ded )
 1007  
     {
 1008  
         // reuse the spot
 1009  
         if ( ded != null )
 1010  
         {
 1011  
             this.adjustBytesFree( ded, true );
 1012  
 
 1013  
             if ( doRecycle )
 1014  
             {
 1015  
 
 1016  
                 recycle.add( ded );
 1017  
                 if ( log.isDebugEnabled() )
 1018  
                 {
 1019  
                     log.debug( logCacheName + "recycled ded" + ded );
 1020  
                 }
 1021  
 
 1022  
             }
 1023  
         }
 1024  
     }
 1025  
 
 1026  
     /**
 1027  
      * Performs the check for optimization, and if it is required, do it.
 1028  
      */
 1029  
     private void doOptimizeRealTime()
 1030  
     {
 1031  
         if ( isRealTimeOptimizationEnabled && !isOptimizing && ( removeCount++ >= cattr.getOptimizeAtRemoveCount() ) )
 1032  
         {
 1033  
             isOptimizing = true;
 1034  
 
 1035  
             if ( log.isInfoEnabled() )
 1036  
             {
 1037  
                 log.info( logCacheName + "Optimizing file. removeCount [" + removeCount + "] OptimizeAtRemoveCount ["
 1038  
                     + cattr.getOptimizeAtRemoveCount() + "]" );
 1039  
             }
 1040  
 
 1041  
             if ( currentOptimizationThread == null )
 1042  
             {
 1043  
                 try
 1044  
                 {
 1045  
                     storageLock.writeLock().acquire();
 1046  
                     if ( currentOptimizationThread == null )
 1047  
                     {
 1048  
                         currentOptimizationThread = new Thread( class="keyword">new Runnable()
 1049  
                         {
 1050  
                             public void run()
 1051  
                             {
 1052  
                                 optimizeFile();
 1053  
 
 1054  
                                 currentOptimizationThread = null;
 1055  
                             }
 1056  
                         }, "IndexedDiskCache-OptimizationThread" );
 1057  
                     }
 1058  
                 }
 1059  
                 catch ( InterruptedException e )
 1060  
                 {
 1061  
                     log.error( logCacheName + "Unable to aquire storage write lock.", e );
 1062  
                 }
 1063  
                 finally
 1064  
                 {
 1065  
                     storageLock.writeLock().release();
 1066  
                 }
 1067  
 
 1068  
                 if ( currentOptimizationThread != null )
 1069  
                 {
 1070  
                     currentOptimizationThread.start();
 1071  
                 }
 1072  
             }
 1073  
         }
 1074  
     }
 1075  
 
 1076  
     /**
 1077  
      * File optimization is handled by this method. It works as follows:
 1078  
      * <ol>
 1079  
      * <li>Shutdown recycling and turn on queuing of puts. </li>
 1080  
      * <li>Take a snapshot of the current descriptors. If there are any removes, ignore them, as
 1081  
      * they will be compacted during the next optimization.</li>
 1082  
      * <li>Optimize the snapshot. For each descriptor:
 1083  
      * <ol>
 1084  
      * <li>Obtain the write-lock.</li>
 1085  
      * <li>Shift the element on the disk, in order to compact out the free space. </li>
 1086  
      * <li>Release the write-lock. This allows elements to still be accessible during optimization.</li>
 1087  
      * </ol>
 1088  
      * <li>Obtain the write-lock.</li>
 1089  
      * <li>All queued puts are made at the end of the file. Optimize these under a single
 1090  
      * write-lock.</li>
 1091  
      * <li>Truncate the file.</li>
 1092  
      * <li>Release the write-lock.</li>
 1093  
      * <li>Restore system to standard operation.</li>
 1094  
      * </ol>
 1095  
      */
 1096  
     protected void optimizeFile()
 1097  
     {
 1098  
         ElapsedTimer timer = new ElapsedTimer();
 1099  
         timesOptimized++;
 1100  
         if ( log.isInfoEnabled() )
 1101  
         {
 1102  
             log.info( logCacheName + "Beginning Optimization #" + timesOptimized );
 1103  
         }
 1104  
 
 1105  
         // CREATE SNAPSHOT
 1106  
         IndexedDiskElementDescriptor[] defragList = null;
 1107  
         try
 1108  
         {
 1109  
             storageLock.writeLock().acquire();
 1110  
             queueInput = true;
 1111  
             // shut off recycle while we're optimizing,
 1112  
             doRecycle = false;
 1113  
             defragList = createPositionSortedDescriptorList();
 1114  
             // Release iff I aquired.
 1115  
             storageLock.writeLock().release();
 1116  
         }
 1117  
         catch ( InterruptedException e )
 1118  
         {
 1119  
             log.error( logCacheName + "Error setting up optimization.", e );
 1120  
             return;
 1121  
         }
 1122  
 
 1123  
         // Defrag the file outside of the write lock. This allows a move to be made,
 1124  
         // and yet have the element still accessible for reading or writing.
 1125  
         long expectedNextPos = defragFile( defragList, 0 );
 1126  
 
 1127  
         // ADD THE QUEUED ITEMS to the end and then truncate
 1128  
         try
 1129  
         {
 1130  
             storageLock.writeLock().acquire();
 1131  
 
 1132  
             if ( !queuedPutList.isEmpty() )
 1133  
             {
 1134  
                 // This is perhaps unecessary, but the list might not be as sorted as we think.
 1135  
                 defragList = new IndexedDiskElementDescriptor[queuedPutList.size()];
 1136  
                 queuedPutList.toArray( defragList );
 1137  
                 Arrays.sort( defragList, new PositionComparator() );
 1138  
 
 1139  
                 // pack them at the end
 1140  
                 expectedNextPos = defragFile( defragList, expectedNextPos );
 1141  
             }
 1142  
             // TRUNCATE THE FILE
 1143  
             dataFile.truncate( expectedNextPos );
 1144  
         }
 1145  
         catch ( Exception e )
 1146  
         {
 1147  
             log.error( logCacheName + "Error optimizing queued puts.", e );
 1148  
         }
 1149  
         finally
 1150  
         {
 1151  
             // RESTORE NORMAL OPERATION
 1152  
             removeCount = 0;
 1153  
             bytesFree = 0;
 1154  
             initRecycleBin();
 1155  
             queuedPutList.clear();
 1156  
             queueInput = false;
 1157  
             // turn recycle back on.
 1158  
             doRecycle = true;
 1159  
             isOptimizing = false;
 1160  
 
 1161  
             storageLock.writeLock().release();
 1162  
         }
 1163  
 
 1164  
         if ( log.isInfoEnabled() )
 1165  
         {
 1166  
             log.info( logCacheName + "Finished #" + timesOptimized + " Optimization took "
 1167  
                 + timer.getElapsedTimeString() );
 1168  
         }
 1169  
     }
 1170  
 
 1171  
     /**
 1172  
      * Defragments the file inplace by compacting out the free space (i.e., moving records forward).
 1173  
      * If there were no gaps the resulting file would be the same size as the previous file. This
 1174  
      * must be supplied an ordered defragList.
 1175  
      * <p>
 1176  
      * @param defragList sorted list of descriptors for optimization
 1177  
      * @param startingPos the start position in the file
 1178  
      * @return this is the potential new file end
 1179  
      */
 1180  
     private long defragFile( IndexedDiskElementDescriptor[] defragList, class="keyword">long startingPos )
 1181  
     {
 1182  
         ElapsedTimer timer = new ElapsedTimer();
 1183  
         long preFileSize = 0;
 1184  
         long postFileSize = 0;
 1185  
         long expectedNextPos = 0;
 1186  
         try
 1187  
         {
 1188  
             preFileSize = this.dataFile.length();
 1189  
             // find the first gap in the disk and start defragging.
 1190  
             expectedNextPos = startingPos;
 1191  
             for ( int i = 0; i < defragList.length; i++ )
 1192  
             {
 1193  
                 storageLock.writeLock().acquire();
 1194  
                 try
 1195  
                 {
 1196  
                     if ( expectedNextPos != defragList[i].pos )
 1197  
                     {
 1198  
                         dataFile.move( defragList[i], expectedNextPos );
 1199  
                     }
 1200  
                     expectedNextPos = defragList[i].pos + IndexedDisk.RECORD_HEADER + defragList[i].len;
 1201  
                 }
 1202  
                 finally
 1203  
                 {
 1204  
                     storageLock.writeLock().release();
 1205  
                 }
 1206  
             }
 1207  
 
 1208  
             postFileSize = this.dataFile.length();
 1209  
 
 1210  
             // this is the potential new file end
 1211  
             return expectedNextPos;
 1212  
         }
 1213  
         catch ( IOException e )
 1214  
         {
 1215  
             log.error( logCacheName + "Error occurred during defragmentation.", e );
 1216  
         }
 1217  
         catch ( InterruptedException e )
 1218  
         {
 1219  
             log.error( logCacheName + "Threading problem", e );
 1220  
         }
 1221  
         finally
 1222  
         {
 1223  
             if ( log.isInfoEnabled() )
 1224  
             {
 1225  
                 log.info( logCacheName + "Defragmentation took " + timer.getElapsedTimeString()
 1226  
                     + ". File Size (before=" + preFileSize + ") (after=" + postFileSize + ") (truncating to "
 1227  
                     + expectedNextPos + ")" );
 1228  
             }
 1229  
         }
 1230  
 
 1231  
         return 0;
 1232  
     }
 1233  
 
 1234  
     /**
 1235  
      * Creates a snapshot of the IndexedDiskElementDescriptors in the keyHash and returns them
 1236  
      * sorted by position in the dataFile.
 1237  
      * <p>
 1238  
      * TODO fix values() method on the LRU map.
 1239  
      * <p>
 1240  
      * @return IndexedDiskElementDescriptor[]
 1241  
      */
 1242  
     private IndexedDiskElementDescriptor[] createPositionSortedDescriptorList()
 1243  
     {
 1244  
         IndexedDiskElementDescriptor[] defragList = new IndexedDiskElementDescriptor[keyHash.size()];
 1245  
         Iterator iterator = keyHash.entrySet().iterator();
 1246  
         for ( int i = 0; iterator.hasNext(); i++ )
 1247  
         {
 1248  
             Object next = iterator.next();
 1249  
             defragList[i] = (IndexedDiskElementDescriptor) ( (Map.Entry) next ).getValue();
 1250  
         }
 1251  
 
 1252  
         Arrays.sort( defragList, new PositionComparator() );
 1253  
 
 1254  
         return defragList;
 1255  
     }
 1256  
 
 1257  
     /**
 1258  
      * Returns the current cache size.
 1259  
      * <p>
 1260  
      * @return The size value
 1261  
      */
 1262  
     public int getSize()
 1263  
     {
 1264  
         return keyHash.size();
 1265  
     }
 1266  
 
 1267  
     /**
 1268  
      * Returns the size of the recyclebin in number of elements.
 1269  
      * <p>
 1270  
      * @return The number of items in the bin.
 1271  
      */
 1272  
     protected int getRecyleBinSize()
 1273  
     {
 1274  
         return this.recycle.size();
 1275  
     }
 1276  
 
 1277  
     /**
 1278  
      * Returns the number of times we have used spots from the recycle bin.
 1279  
      * <p>
 1280  
      * @return The number of spots used.
 1281  
      */
 1282  
     protected int getRecyleCount()
 1283  
     {
 1284  
         return this.recycleCnt;
 1285  
     }
 1286  
 
 1287  
     /**
 1288  
      * Returns the number of bytes that are free. When an item is removed, its length is recorded.
 1289  
      * When a spot is used form the recycle bin, the length of the item stored is recorded.
 1290  
      * <p>
 1291  
      * @return The number bytes free on the disk file.
 1292  
      */
 1293  
     protected synchronized long getBytesFree()
 1294  
     {
 1295  
         return this.bytesFree;
 1296  
     }
 1297  
 
 1298  
     /**
 1299  
      * To subtract you can pass in false for add..
 1300  
      * <p>
 1301  
      * @param ded
 1302  
      * @param add
 1303  
      */
 1304  
     private synchronized void adjustBytesFree( IndexedDiskElementDescriptor ded, boolean add )
 1305  
     {
 1306  
         if ( ded != null )
 1307  
         {
 1308  
             int amount = ded.len + IndexedDisk.RECORD_HEADER;
 1309  
 
 1310  
             if ( add )
 1311  
             {
 1312  
                 this.bytesFree += amount;
 1313  
             }
 1314  
             else
 1315  
             {
 1316  
                 this.bytesFree -= amount;
 1317  
             }
 1318  
         }
 1319  
     }
 1320  
 
 1321  
     /**
 1322  
      * This is for debugging and testing.
 1323  
      * <p>
 1324  
      * @return the length of the data file.
 1325  
      * @throws IOException
 1326  
      */
 1327  
     protected long getDataFileSize()
 1328  
         throws IOException
 1329  
     {
 1330  
         long size = 0;
 1331  
 
 1332  
         try
 1333  
         {
 1334  
             storageLock.readLock().acquire();
 1335  
             if ( dataFile != null )
 1336  
             {
 1337  
                 size = dataFile.length();
 1338  
             }
 1339  
         }
 1340  
         catch ( InterruptedException e )
 1341  
         {
 1342  
             // nothing
 1343  
         }
 1344  
         finally
 1345  
         {
 1346  
             storageLock.readLock().release();
 1347  
         }
 1348  
         return size;
 1349  
     }
 1350  
 
 1351  
     /**
 1352  
      * For debugging. This dumps the values by defualt.
 1353  
      */
 1354  
     public void dump()
 1355  
     {
 1356  
         dump( true );
 1357  
     }
 1358  
 
 1359  
     /**
 1360  
      * For debugging.
 1361  
      * <p>
 1362  
      * @param dumpValues A boolean indicating if values should be dumped.
 1363  
      */
 1364  
     public void dump( boolean dumpValues )
 1365  
     {
 1366  
         if ( log.isDebugEnabled() )
 1367  
         {
 1368  
             log.debug( logCacheName + "[dump] Number of keys: " + keyHash.size() );
 1369  
 
 1370  
             Iterator itr = keyHash.entrySet().iterator();
 1371  
 
 1372  
             while ( itr.hasNext() )
 1373  
             {
 1374  
                 Map.Entry e = (Map.Entry) itr.next();
 1375  
                 Serializable key = (Serializable) e.getKey();
 1376  
                 IndexedDiskElementDescriptor ded = (IndexedDiskElementDescriptor) e.getValue();
 1377  
 
 1378  
                 log.debug( logCacheName + "[dump] Disk element, key: " + key + ", pos: " + ded.pos + ", ded.len"
 1379  
                     + ded.len + ( ( dumpValues ) ? ( ", val: " + get( key ) ) : "" ) );
 1380  
             }
 1381  
         }
 1382  
     }
 1383  
 
 1384  
     /**
 1385  
      * @return Returns the AuxiliaryCacheAttributes.
 1386  
      */
 1387  
     public AuxiliaryCacheAttributes getAuxiliaryCacheAttributes()
 1388  
     {
 1389  
         return this.cattr;
 1390  
     }
 1391  
 
 1392  
     /**
 1393  
      * Increments the hit count in a thread safe manner.
 1394  
      */
 1395  
     private synchronized void incrementHitCount()
 1396  
     {
 1397  
         hitCount++;
 1398  
     }
 1399  
 
 1400  
     /**
 1401  
      * Gets basic stats for the disk cache.
 1402  
      * <p>
 1403  
      * @return String
 1404  
      */
 1405  
     public String getStats()
 1406  
     {
 1407  
         return getStatistics().toString();
 1408  
     }
 1409  
 
 1410  
     /**
 1411  
      * Returns info about the disk cache.
 1412  
      * <p>
 1413  
      * (non-Javadoc)
 1414  
      * @see org.apache.jcs.auxiliary.AuxiliaryCache#getStatistics()
 1415  
      */
 1416  
     public synchronized IStats getStatistics()
 1417  
     {
 1418  
         IStats stats = new Stats();
 1419  
         stats.setTypeName( "Indexed Disk Cache" );
 1420  
 
 1421  
         ArrayList elems = new ArrayList();
 1422  
 
 1423  
         IStatElement se = null;
 1424  
 
 1425  
         se = new StatElement();
 1426  
         se.setName( "Is Alive" );
 1427  
         se.setData( "" + alive );
 1428  
         elems.add( se );
 1429  
 
 1430  
         se = new StatElement();
 1431  
         se.setName( "Key Map Size" );
 1432  
         if ( this.keyHash != null )
 1433  
         {
 1434  
             se.setData( "" + this.keyHash.size() );
 1435  
         }
 1436  
         else
 1437  
         {
 1438  
             se.setData( "-1" );
 1439  
         }
 1440  
         elems.add( se );
 1441  
 
 1442  
         try
 1443  
         {
 1444  
             se = new StatElement();
 1445  
             se.setName( "Data File Length" );
 1446  
             if ( this.dataFile != null )
 1447  
             {
 1448  
                 se.setData( "" + this.dataFile.length() );
 1449  
             }
 1450  
             else
 1451  
             {
 1452  
                 se.setData( "-1" );
 1453  
             }
 1454  
             elems.add( se );
 1455  
         }
 1456  
         catch ( Exception e )
 1457  
         {
 1458  
             log.error( e );
 1459  
         }
 1460  
 
 1461  
         se = new StatElement();
 1462  
         se.setName( "Hit Count" );
 1463  
         se.setData( "" + this.hitCount );
 1464  
         elems.add( se );
 1465  
 
 1466  
         se = new StatElement();
 1467  
         se.setName( "Bytes Free" );
 1468  
         se.setData( "" + this.bytesFree );
 1469  
         elems.add( se );
 1470  
 
 1471  
         se = new StatElement();
 1472  
         se.setName( "Optimize Operation Count" );
 1473  
         se.setData( "" + this.removeCount );
 1474  
         elems.add( se );
 1475  
 
 1476  
         se = new StatElement();
 1477  
         se.setName( "Times Optimized" );
 1478  
         se.setData( "" + this.timesOptimized );
 1479  
         elems.add( se );
 1480  
 
 1481  
         se = new StatElement();
 1482  
         se.setName( "Recycle Count" );
 1483  
         se.setData( "" + this.recycleCnt );
 1484  
         elems.add( se );
 1485  
 
 1486  
         se = new StatElement();
 1487  
         se.setName( "Recycle Bin Size" );
 1488  
         se.setData( "" + this.recycle.size() );
 1489  
         elems.add( se );
 1490  
 
 1491  
         se = new StatElement();
 1492  
         se.setName( "Startup Size" );
 1493  
         se.setData( "" + this.startupSize );
 1494  
         elems.add( se );
 1495  
 
 1496  
         // get the stats from the super too
 1497  
         // get as array, convert to list, add list to our outer list
 1498  
         IStats sStats = super.getStatistics();
 1499  
         IStatElement[] sSEs = sStats.getStatElements();
 1500  
         List sL = Arrays.asList( sSEs );
 1501  
         elems.addAll( sL );
 1502  
 
 1503  
         // get an array and put them in the Stats object
 1504  
         IStatElement[] ses = (IStatElement[]) elems.toArray( new StatElement[0] );
 1505  
         stats.setStatElements( ses );
 1506  
 
 1507  
         return stats;
 1508  
     }
 1509  
 
 1510  
     /**
 1511  
      * This is exposed for testing.
 1512  
      * <p>
 1513  
      * @return Returns the timesOptimized.
 1514  
      */
 1515  
     protected int getTimesOptimized()
 1516  
     {
 1517  
         return timesOptimized;
 1518  
     }
 1519  
 
 1520  
     /**
 1521  
      * Compares IndexedDiskElementDescriptor based on their position.
 1522  
      * <p>
 1523  
      */
 1524  
     private static final class PositionComparator
 1525  
         implements Comparator
 1526  
     {
 1527  
         /**
 1528  
          * Compares two descriptors based on position.
 1529  
          * <p>
 1530  
          * @see java.util.Comparator#compare(java.lang.Object, java.lang.Object)
 1531  
          */
 1532  
         public int compare( Object o1, Object o2 )
 1533  
         {
 1534  
             IndexedDiskElementDescriptor ded1 = (IndexedDiskElementDescriptor) o1;
 1535  
             IndexedDiskElementDescriptor ded2 = (IndexedDiskElementDescriptor) o2;
 1536  
 
 1537  
             if ( ded1.pos < ded2.pos )
 1538  
             {
 1539  
                 return -1;
 1540  
             }
 1541  
             else if ( ded1.pos == ded2.pos )
 1542  
             {
 1543  
                 return 0;
 1544  
             }
 1545  
             else
 1546  
             {
 1547  
                 return 1;
 1548  
             }
 1549  
         }
 1550  
     }
 1551  
 
 1552  
     /**
 1553  
      * Class for recylcing and lru. This implments the LRU overflow callback, so we can add items to
 1554  
      * the recycle bin.
 1555  
      */
 1556  
     public class LRUMap
 1557  
         extends LRUMapJCS
 1558  
     {
 1559  
         /** Don't change */
 1560  
         private static final long serialVersionUID = 4955079991472142198L;
 1561  
 
 1562  
         /**
 1563  
          * <code>tag</code> tells us which map we are working on.
 1564  
          */
 1565  
         public String tag = "orig";
 1566  
 
 1567  
         /**
 1568  
          * Default
 1569  
          */
 1570  
         public LRUMap()
 1571  
         {
 1572  
             super();
 1573  
         }
 1574  
 
 1575  
         /**
 1576  
          * @param maxKeySize
 1577  
          */
 1578  
         public LRUMap( int maxKeySize )
 1579  
         {
 1580  
             super( maxKeySize );
 1581  
         }
 1582  
 
 1583  
         /**
 1584  
          * This is called when the may key size is reaced. The least recently used item will be
 1585  
          * passed here. We will store the position and size of the spot on disk in the recycle bin.
 1586  
          * <p>
 1587  
          * @param key
 1588  
          * @param value
 1589  
          */
 1590  
         protected void processRemovedLRU( Object key, Object value )
 1591  
         {
 1592  
             addToRecycleBin( (IndexedDiskElementDescriptor) value );
 1593  
             if ( log.isDebugEnabled() )
 1594  
             {
 1595  
                 log.debug( logCacheName + "Removing key: [" + key + "] from key store." );
 1596  
                 log.debug( logCacheName + "Key store size: [" + this.size() + "]." );
 1597  
             }
 1598  
 
 1599  
             doOptimizeRealTime();
 1600  
         }
 1601  
     }
 1602  
 
 1603  
     /**
 1604  
      * Called on shutdown. This gives use a chance to store the keys and to optimize even if the
 1605  
      * cache manager's shutdown method was not called.
 1606  
      */
 1607  311
     class ShutdownHook
 1608  
         extends Thread
 1609  
     {
 1610  
         /**
 1611  
          * This will persist the keys on shutdown.
 1612  
          * <p>
 1613  
          * @see java.lang.Thread#run()
 1614  
          */
 1615  
         public void run()
 1616  
         {
 1617  310
             if ( alive )
 1618  
             {
 1619  309
                 log.warn( logCacheName + "Disk cache not shutdown properly, shutting down now." );
 1620  311
                 doDispose();
 1621  
             }
 1622  288
         }
 1623  
     }
 1624  
 }

This report is generated by jcoverage, Maven and Maven JCoverage Plugin.