View Javadoc

1   package org.apache.jcs.auxiliary.disk.block;
2   
3   /*
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *   http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing,
15   * software distributed under the License is distributed on an
16   * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
17   * KIND, either express or implied.  See the License for the
18   * specific language governing permissions and limitations
19   * under the License.
20   */
21  
22  import java.io.File;
23  import java.io.IOException;
24  import java.io.Serializable;
25  import java.util.ArrayList;
26  import java.util.Arrays;
27  import java.util.HashSet;
28  import java.util.Iterator;
29  import java.util.List;
30  import java.util.Map;
31  import java.util.Set;
32  
33  import org.apache.commons.logging.Log;
34  import org.apache.commons.logging.LogFactory;
35  import org.apache.jcs.auxiliary.AuxiliaryCacheAttributes;
36  import org.apache.jcs.auxiliary.disk.AbstractDiskCache;
37  import org.apache.jcs.engine.CacheConstants;
38  import org.apache.jcs.engine.behavior.ICacheElement;
39  import org.apache.jcs.engine.control.group.GroupAttrName;
40  import org.apache.jcs.engine.control.group.GroupId;
41  import org.apache.jcs.engine.stats.StatElement;
42  import org.apache.jcs.engine.stats.Stats;
43  import org.apache.jcs.engine.stats.behavior.IStatElement;
44  import org.apache.jcs.engine.stats.behavior.IStats;
45  
46  import EDU.oswego.cs.dl.util.concurrent.WriterPreferenceReadWriteLock;
47  
48  /***
49   * There is one BlockDiskCache per region. It manages the key and data store.
50   * <p>
51   * @author Aaron Smuts
52   */
53  public class BlockDiskCache
54      extends AbstractDiskCache
55  {
56      /*** Don't change */
57      private static final long serialVersionUID = 1L;
58  
59      /*** The logger. */
60      private static final Log log = LogFactory.getLog( BlockDiskCache.class );
61  
62      /*** The name to prefix all log messages with. */
63      private final String logCacheName;
64  
65      /*** The name of the file to store data. */
66      private String fileName;
67  
68      /*** The data access object */
69      private BlockDisk dataFile;
70  
71      /*** Attributes governing the behavior of the block disk cache. */
72      private BlockDiskCacheAttributes blockDiskCacheAttributes;
73  
74      /*** The root directory for keys and data. */
75      private File rootDirectory;
76  
77      /*** Store, loads, and persists the keys */
78      private BlockDiskKeyStore keyStore;
79  
80      /***
81       * Use this lock to synchronize reads and writes to the underlying storage mechansism. We don't
82       * need a reentrant lock, since we only lock one level.
83       */
84      // private ReentrantWriterPreferenceReadWriteLock storageLock = new
85      // ReentrantWriterPreferenceReadWriteLock();
86      private WriterPreferenceReadWriteLock storageLock = new WriterPreferenceReadWriteLock();
87  
88      /***
89       * Constructs the BlockDisk after setting up the root directory.
90       * <p>
91       * @param cacheAttributes
92       */
93      public BlockDiskCache( BlockDiskCacheAttributes cacheAttributes )
94      {
95          super( cacheAttributes );
96  
97          this.blockDiskCacheAttributes = cacheAttributes;
98          this.logCacheName = "Region [" + getCacheName() + "] ";
99  
100         if ( log.isInfoEnabled() )
101         {
102             log.info( logCacheName + "Constructing BlockDiskCache with attributes " + cacheAttributes );
103         }
104 
105         this.fileName = getCacheName();
106         String rootDirName = cacheAttributes.getDiskPath();
107         this.rootDirectory = new File( rootDirName );
108         this.rootDirectory.mkdirs();
109 
110         if ( log.isInfoEnabled() )
111         {
112             log.info( logCacheName + "Cache file root directory: [" + rootDirName + "]" );
113         }
114 
115         try
116         {
117             if ( this.blockDiskCacheAttributes.getBlockSizeBytes() > 0 )
118             {
119                 this.dataFile = new BlockDisk( new File( rootDirectory, fileName + ".data" ),
120                                                this.blockDiskCacheAttributes.getBlockSizeBytes() );
121             }
122             else
123             {
124                 this.dataFile = new BlockDisk( new File( rootDirectory, fileName + ".data" ) );
125             }
126 
127             keyStore = new BlockDiskKeyStore( this.blockDiskCacheAttributes, this );
128 
129             boolean alright = verifyDisk();
130 
131             if ( keyStore.size() == 0 || !alright )
132             {
133                 this.reset();
134             }
135 
136             // Initialization finished successfully, so set alive to true.
137             alive = true;
138             if ( log.isInfoEnabled() )
139             {
140                 log.info( logCacheName + "Block Disk Cache is alive." );
141             }
142         }
143         catch ( Exception e )
144         {
145             log.error( logCacheName + "Failure initializing for fileName: " + fileName + " and root directory: "
146                 + rootDirName, e );
147         }
148         ShutdownHook shutdownHook = new ShutdownHook();
149         Runtime.getRuntime().addShutdownHook( shutdownHook );
150     }
151 
152     /***
153      * We need to verify that the file on disk uses the same block size and that the file is the
154      * proper size.
155      * <p>
156      * @return true if it looks ok
157      */
158     protected boolean verifyDisk()
159     {
160         boolean alright = false;
161         // simply try to read a few. If it works, then the file is probably ok.
162         // TODO add more.
163         try
164         {
165             int maxToTest = 100;
166             int count = 0;
167             Set keySet = this.keyStore.entrySet();
168             Iterator it = keySet.iterator();
169             while ( it.hasNext() && count < maxToTest )
170             {
171                 count++;
172                 Map.Entry entry = (Map.Entry) it.next();
173                 Object data = this.dataFile.read( (int[]) entry.getValue() );
174                 if ( data == null )
175                 {
176                     throw new Exception( "Couldn't find data for key [" + entry.getKey() + "]" );
177                 }
178             }
179             alright = true;
180         }
181         catch ( Exception e )
182         {
183             log.warn( "Problem verifying disk.  Message [" + e.getMessage() + "]" );
184             alright = false;
185         }
186         return alright;
187     }
188 
189     /***
190      * This requires a full iteration through the keys.
191      * <p>
192      * (non-Javadoc)
193      * @see org.apache.jcs.auxiliary.disk.AbstractDiskCache#getGroupKeys(java.lang.String)
194      */
195     public Set getGroupKeys( String groupName )
196     {
197         GroupId groupId = new GroupId( cacheName, groupName );
198         HashSet keys = new HashSet();
199         try
200         {
201             storageLock.readLock().acquire();
202 
203             for ( Iterator itr = this.keyStore.keySet().iterator(); itr.hasNext(); )
204             {
205                 Object k = itr.next();
206                 if ( k instanceof GroupAttrName && ( (GroupAttrName) k ).groupId.equals( groupId ) )
207                 {
208                     keys.add( ( (GroupAttrName) k ).attrName );
209                 }
210             }
211         }
212         catch ( Exception e )
213         {
214             log.error( logCacheName + "Failure getting from disk, group = " + groupName, e );
215         }
216         finally
217         {
218             storageLock.readLock().release();
219         }
220 
221         return keys;
222     }
223 
224     /***
225      * Returns the number of keys.
226      * <p>
227      * (non-Javadoc)
228      * @see org.apache.jcs.auxiliary.disk.AbstractDiskCache#getSize()
229      */
230     public int getSize()
231     {
232         return this.keyStore.size();
233     }
234 
235     /***
236      * Gets the ICacheElement for the key if it is in the cache. The program flow is as follows:
237      * <ol>
238      * <li>Make sure the disk cache is alive.</li>
239      * <li>Get a read lock.</li>
240      * <li>See if the key is in the key store.</li>
241      * <li>If we found a key, ask the BlockDisk for the object at the blocks..</li>
242      * <li>Release the lock.</li>
243      * </ol>
244      * (non-Javadoc)
245      * @see org.apache.jcs.auxiliary.disk.AbstractDiskCache#doGet(java.io.Serializable)
246      */
247     protected ICacheElement doGet( Serializable key )
248     {
249         if ( !alive )
250         {
251             if ( log.isDebugEnabled() )
252             {
253                 log.debug( logCacheName + "No longer alive so returning null for key = " + key );
254             }
255             return null;
256         }
257 
258         if ( log.isDebugEnabled() )
259         {
260             log.debug( logCacheName + "Trying to get from disk: " + key );
261         }
262 
263         ICacheElement object = null;
264         try
265         {
266             storageLock.readLock().acquire();
267             try
268             {
269                 int[] ded = this.keyStore.get( key );
270                 if ( ded != null )
271                 {
272                     object = (ICacheElement) this.dataFile.read( ded );
273                 }
274             }
275             finally
276             {
277                 storageLock.readLock().release();
278             }
279         }
280         catch ( IOException ioe )
281         {
282             log.error( logCacheName + "Failure getting from disk--IOException, key = " + key, ioe );
283             reset();
284         }
285         catch ( Exception e )
286         {
287             log.error( logCacheName + "Failure getting from disk, key = " + key, e );
288         }
289 
290         return object;
291     }
292 
293     /***
294      * Writes an element to disk. The program flow is as follows:
295      * <ol>
296      * <li>Aquire write lock.</li>
297      * <li>See id an item exists for this key.</li>
298      * <li>If an itme already exists, add its blocks to the remove list.</li>
299      * <li>Have the Block disk write the item.</li>
300      * <li>Create a descriptor and add it to the key map.</li>
301      * <li>Release the write lock.</li>
302      * </ol>
303      * (non-Javadoc)
304      * @see org.apache.jcs.auxiliary.disk.AbstractDiskCache#doUpdate(org.apache.jcs.engine.behavior.ICacheElement)
305      */
306     protected void doUpdate( ICacheElement element )
307     {
308         if ( !alive )
309         {
310             if ( log.isDebugEnabled() )
311             {
312                 log.debug( logCacheName + "No longer alive; aborting put of key = " + element.getKey() );
313             }
314             return;
315         }
316 
317         int[] old = null;
318         try
319         {
320             // make sure this only locks for one particular cache region
321             storageLock.writeLock().acquire();
322             try
323             {
324                 old = this.keyStore.get( element.getKey() );
325 
326                 if ( old != null )
327                 {
328                     this.dataFile.freeBlocks( old );
329                 }
330 
331                 int[] blocks = this.dataFile.write( element );
332 
333                 this.keyStore.put( element.getKey(), blocks );
334             }
335             finally
336             {
337                 storageLock.writeLock().release();
338             }
339 
340             if ( log.isDebugEnabled() )
341             {
342                 log.debug( logCacheName + "Put to file [" + fileName + "] key [" + element.getKey() + "]" );
343             }
344         }
345         catch ( Exception e )
346         {
347             log.error( logCacheName + "Failure updating element, key: " + element.getKey() + " old: " + old, e );
348         }
349         if ( log.isDebugEnabled() )
350         {
351             log.debug( logCacheName + "Storing element on disk, key: " + element.getKey() );
352         }
353     }
354 
355     /***
356      * Returns true if the removal was succesful; or false if there is nothing to remove. Current
357      * implementation always result in a disk orphan.
358      * <p>
359      * (non-Javadoc)
360      * @see org.apache.jcs.auxiliary.disk.AbstractDiskCache#doRemove(java.io.Serializable)
361      */
362     protected boolean doRemove( Serializable key )
363     {
364         if ( !alive )
365         {
366             if ( log.isDebugEnabled() )
367             {
368                 log.debug( logCacheName + "No longer alive so returning false for key = " + key );
369             }
370             return false;
371         }
372 
373         boolean reset = false;
374         boolean removed = false;
375         try
376         {
377             storageLock.writeLock().acquire();
378 
379             if ( key instanceof String && key.toString().endsWith( CacheConstants.NAME_COMPONENT_DELIMITER ) )
380             {
381                 // remove all keys of the same name group.
382 
383                 Iterator iter = this.keyStore.entrySet().iterator();
384 
385                 while ( iter.hasNext() )
386                 {
387                     Map.Entry entry = (Map.Entry) iter.next();
388 
389                     Object k = entry.getKey();
390 
391                     if ( k instanceof String && k.toString().startsWith( key.toString() ) )
392                     {
393                         int[] ded = this.keyStore.get( key );
394                         this.dataFile.freeBlocks( ded );
395                         iter.remove();
396                         removed = true;
397                         // TODO this needs to update the rmove count separately
398                     }
399                 }
400             }
401             else if ( key instanceof GroupId )
402             {
403                 // remove all keys of the same name hierarchy.
404                 Iterator iter = this.keyStore.entrySet().iterator();
405                 while ( iter.hasNext() )
406                 {
407                     Map.Entry entry = (Map.Entry) iter.next();
408                     Object k = entry.getKey();
409 
410                     if ( k instanceof GroupAttrName && ( (GroupAttrName) k ).groupId.equals( key ) )
411                     {
412                         int[] ded = this.keyStore.get( key );
413                         this.dataFile.freeBlocks( ded );
414                         iter.remove();
415                         removed = true;
416                     }
417                 }
418             }
419             else
420             {
421                 // remove single item.
422                 int[] ded = this.keyStore.remove( key );
423                 removed = ( ded != null );
424                 if ( ded != null )
425                 {
426                     this.dataFile.freeBlocks( ded );
427                 }
428 
429                 if ( log.isDebugEnabled() )
430                 {
431                     log.debug( logCacheName + "Disk removal: Removed from key hash, key [" + key + "] removed = "
432                         + removed );
433                 }
434             }
435         }
436         catch ( Exception e )
437         {
438             log.error( logCacheName + "Problem removing element.", e );
439             reset = true;
440         }
441         finally
442         {
443             storageLock.writeLock().release();
444         }
445 
446         if ( reset )
447         {
448             reset();
449         }
450 
451         return removed;
452     }
453 
454     /***
455      * Resets the keyfile, the disk file, and the memory key map.
456      * <p>
457      * (non-Javadoc)
458      * @see org.apache.jcs.auxiliary.disk.AbstractDiskCache#doRemoveAll()
459      */
460     protected void doRemoveAll()
461     {
462         try
463         {
464             reset();
465         }
466         catch ( Exception e )
467         {
468             log.error( logCacheName + "Problem removing all.", e );
469             reset();
470         }
471     }
472 
473     /***
474      * Dispose of the disk cache in a background thread. Joins against this thread to put a cap on
475      * the disposal time.
476      * <p>
477      * @todo make dispose window configurable.
478      */
479     public void doDispose()
480     {
481         Runnable disR = new Runnable()
482         {
483             public void run()
484             {
485                 try
486                 {
487                     disposeInternal();
488                 }
489                 catch ( InterruptedException e )
490                 {
491                     log.warn( "Interrupted while diposing." );
492                 }
493             }
494         };
495         Thread t = new Thread( disR, "BlockDiskCache-DisposalThread" );
496         t.start();
497         // wait up to 60 seconds for dispose and then quit if not done.
498         try
499         {
500             t.join( 60 * 1000 );
501         }
502         catch ( InterruptedException ex )
503         {
504             log.error( logCacheName + "Interrupted while waiting for disposal thread to finish.", ex );
505         }
506     }
507 
508     /***
509      * Internal method that handles the disposal.
510      * @throws InterruptedException
511      */
512     private void disposeInternal()
513         throws InterruptedException
514     {
515         if ( !alive )
516         {
517             log.error( logCacheName + "Not alive and dispose was called, filename: " + fileName );
518             return;
519         }
520         storageLock.writeLock().acquire();
521         try
522         {
523             // Prevents any interaction with the cache while we're shutting down.
524             alive = false;
525 
526             this.keyStore.saveKeys();
527 
528             try
529             {
530                 if ( log.isDebugEnabled() )
531                 {
532                     log.debug( logCacheName + "Closing files, base filename: " + fileName );
533                 }
534                 dataFile.close();
535                 // dataFile = null;
536 
537                 // TOD make a close
538                 // keyFile.close();
539                 // keyFile = null;
540             }
541             catch ( IOException e )
542             {
543                 log.error( logCacheName + "Failure closing files in dispose, filename: " + fileName, e );
544             }
545         }
546         finally
547         {
548             storageLock.writeLock().release();
549         }
550 
551         if ( log.isInfoEnabled() )
552         {
553             log.info( logCacheName + "Shutdown complete." );
554         }
555     }
556 
557     /***
558      * Returns the attributes.
559      * <p>
560      * (non-Javadoc)
561      * @see org.apache.jcs.auxiliary.AuxiliaryCache#getAuxiliaryCacheAttributes()
562      */
563     public AuxiliaryCacheAttributes getAuxiliaryCacheAttributes()
564     {
565         return this.blockDiskCacheAttributes;
566     }
567 
568     /***
569      * Reset effectively clears the disk cache, creating new files, recyclebins, and keymaps.
570      * <p>
571      * It can be used to handle errors by last resort, force content update, or removeall.
572      */
573     private void reset()
574     {
575         if ( log.isWarnEnabled() )
576         {
577             log.warn( logCacheName + "Reseting cache" );
578         }
579 
580         try
581         {
582             storageLock.writeLock().acquire();
583 
584             if ( dataFile != null )
585             {
586                 dataFile.close();
587             }
588             // TODO have the BlockDisk do this itself
589             File dataFileTemp = new File( this.rootDirectory, fileName + ".data" );
590             dataFileTemp.delete();
591 
592             if ( this.blockDiskCacheAttributes.getBlockSizeBytes() > 0 )
593             {
594                 this.dataFile = new BlockDisk( new File( rootDirectory, fileName + ".data" ),
595                                                this.blockDiskCacheAttributes.getBlockSizeBytes() );
596             }
597             else
598             {
599                 this.dataFile = new BlockDisk( new File( rootDirectory, fileName + ".data" ) );
600             }
601 
602             this.keyStore.reset();
603         }
604         catch ( Exception e )
605         {
606             log.error( logCacheName + "Failure reseting state", e );
607         }
608         finally
609         {
610             storageLock.writeLock().release();
611         }
612     }
613 
614     /***
615      * Add these blocks to the emptyBlock list.
616      * <p>
617      * @param blocksToFree
618      */
619     protected void freeBlocks( int[] blocksToFree )
620     {
621         this.dataFile.freeBlocks( blocksToFree );
622     }
623 
624     /***
625      * Called on shutdown. This gives use a chance to store the keys even if the cache manager's
626      * shutdown method was not called.
627      */
628     class ShutdownHook
629         extends Thread
630     {
631         /*** Disposes of the cache. This will result force the keys to be persisted. */
632         public void run()
633         {
634             if ( alive )
635             {
636                 log.warn( logCacheName + "Disk cache not shutdown properly, shutting down now." );
637                 doDispose();
638             }
639         }
640     }
641 
642     /***
643      * Gets basic stats for the disk cache.
644      * <p>
645      * @return String
646      */
647     public String getStats()
648     {
649         return getStatistics().toString();
650     }
651 
652     /***
653      * Returns info about the disk cache.
654      * <p>
655      * (non-Javadoc)
656      * @see org.apache.jcs.auxiliary.AuxiliaryCache#getStatistics()
657      */
658     public IStats getStatistics()
659     {
660         IStats stats = new Stats();
661         stats.setTypeName( "Block Disk Cache" );
662 
663         ArrayList elems = new ArrayList();
664 
665         IStatElement se = null;
666 
667         se = new StatElement();
668         se.setName( "Is Alive" );
669         se.setData( "" + alive );
670         elems.add( se );
671 
672         se = new StatElement();
673         se.setName( "Key Map Size" );
674         se.setData( "" + this.keyStore.size() );
675         elems.add( se );
676 
677         try
678         {
679             se = new StatElement();
680             se.setName( "Data File Length" );
681             if ( this.dataFile != null )
682             {
683                 se.setData( "" + this.dataFile.length() );
684             }
685             else
686             {
687                 se.setData( "-1" );
688             }
689             elems.add( se );
690         }
691         catch ( Exception e )
692         {
693             log.error( e );
694         }
695 
696         se = new StatElement();
697         se.setName( "Block Size Bytes" );
698         se.setData( "" + this.dataFile.getBlockSizeBytes() );
699         elems.add( se );
700 
701         se = new StatElement();
702         se.setName( "Number Of Blocks" );
703         se.setData( "" + this.dataFile.getNumberOfBlocks() );
704         elems.add( se );
705 
706         se = new StatElement();
707         se.setName( "Average Put Size Bytes" );
708         se.setData( "" + this.dataFile.getAveragePutSizeBytes() );
709         elems.add( se );
710 
711         se = new StatElement();
712         se.setName( "Empty Blocks" );
713         se.setData( "" + this.dataFile.getEmptyBlocks() );
714         elems.add( se );
715 
716         // get the stats from the super too
717         // get as array, convert to list, add list to our outer list
718         IStats sStats = super.getStatistics();
719         IStatElement[] sSEs = sStats.getStatElements();
720         List sL = Arrays.asList( sSEs );
721         elems.addAll( sL );
722 
723         // get an array and put them in the Stats object
724         IStatElement[] ses = (IStatElement[]) elems.toArray( new StatElement[0] );
725         stats.setStatElements( ses );
726 
727         return stats;
728     }
729 }