001/**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017package org.apache.activemq.store.kahadb.disk.page;
018
019import java.io.ByteArrayInputStream;
020import java.io.ByteArrayOutputStream;
021import java.io.DataInputStream;
022import java.io.DataOutputStream;
023import java.io.File;
024import java.io.FileInputStream;
025import java.io.FileOutputStream;
026import java.io.IOException;
027import java.io.InterruptedIOException;
028import java.io.RandomAccessFile;
029import java.util.ArrayList;
030import java.util.Arrays;
031import java.util.Collection;
032import java.util.Collections;
033import java.util.Iterator;
034import java.util.LinkedHashMap;
035import java.util.Map;
036import java.util.Map.Entry;
037import java.util.Properties;
038import java.util.TreeMap;
039import java.util.concurrent.CountDownLatch;
040import java.util.concurrent.atomic.AtomicBoolean;
041import java.util.concurrent.atomic.AtomicLong;
042import java.util.concurrent.atomic.AtomicReference;
043import java.util.zip.Adler32;
044import java.util.zip.Checksum;
045
046import org.apache.activemq.store.kahadb.disk.util.Sequence;
047import org.apache.activemq.store.kahadb.disk.util.SequenceSet;
048import org.apache.activemq.util.DataByteArrayOutputStream;
049import org.apache.activemq.util.IOExceptionSupport;
050import org.apache.activemq.util.IOHelper;
051import org.apache.activemq.util.IntrospectionSupport;
052import org.apache.activemq.util.LFUCache;
053import org.apache.activemq.util.LRUCache;
054import org.apache.activemq.util.RecoverableRandomAccessFile;
055import org.slf4j.Logger;
056import org.slf4j.LoggerFactory;
057
058/**
059 * A PageFile provides you random access to fixed sized disk pages. This object is not thread safe and therefore access to it should
060 * be externally synchronized.
061 * <p/>
062 * The file has 3 parts:
063 * Metadata Space: 4k : Reserved metadata area. Used to store persistent config about the file.
064 * Recovery Buffer Space: Page Size * 1000 : This is a redo log used to prevent partial page writes from making the file inconsistent
065 * Page Space: The pages in the page file.
066 */
067public class PageFile {
068
069    private static final String PAGEFILE_SUFFIX = ".data";
070    private static final String RECOVERY_FILE_SUFFIX = ".redo";
071    private static final String FREE_FILE_SUFFIX = ".free";
072
073    // 4k Default page size.
074    public static final int DEFAULT_PAGE_SIZE = Integer.getInteger("defaultPageSize", 1024*4);
075    public static final int DEFAULT_WRITE_BATCH_SIZE = Integer.getInteger("defaultWriteBatchSize", 1000);
076    public static final int DEFAULT_PAGE_CACHE_SIZE = Integer.getInteger("defaultPageCacheSize", 100);;
077
078    private static final int RECOVERY_FILE_HEADER_SIZE = 1024 * 4;
079    private static final int PAGE_FILE_HEADER_SIZE = 1024 * 4;
080
081    // Recovery header is (long offset)
082    private static final Logger LOG = LoggerFactory.getLogger(PageFile.class);
083
084    // A PageFile will use a couple of files in this directory
085    private final File directory;
086    // And the file names in that directory will be based on this name.
087    private final String name;
088
089    // File handle used for reading pages..
090    private RecoverableRandomAccessFile readFile;
091    // File handle used for writing pages..
092    private RecoverableRandomAccessFile writeFile;
093    // File handle used for writing pages..
094    private RecoverableRandomAccessFile recoveryFile;
095
096    // The size of pages
097    private int pageSize = DEFAULT_PAGE_SIZE;
098
099    // The minimum number of space allocated to the recovery file in number of pages.
100    private int recoveryFileMinPageCount = 1000;
101    // The max size that we let the recovery file grow to.. ma exceed the max, but the file will get resize
102    // to this max size as soon as  possible.
103    private int recoveryFileMaxPageCount = 10000;
104    // The number of pages in the current recovery buffer
105    private int recoveryPageCount;
106
107    private final AtomicBoolean loaded = new AtomicBoolean();
108    // The number of pages we are aiming to write every time we
109    // write to disk.
110    int writeBatchSize = DEFAULT_WRITE_BATCH_SIZE;
111
112    // We keep a cache of pages recently used?
113    private Map<Long, Page> pageCache;
114    // The cache of recently used pages.
115    private boolean enablePageCaching = true;
116    // How many pages will we keep in the cache?
117    private int pageCacheSize = DEFAULT_PAGE_CACHE_SIZE;
118
119    // Should first log the page write to the recovery buffer? Avoids partial
120    // page write failures..
121    private boolean enableRecoveryFile = true;
122    // Will we sync writes to disk. Ensures that data will not be lost after a checkpoint()
123    private boolean enableDiskSyncs = true;
124    // Will writes be done in an async thread?
125    private boolean enabledWriteThread = false;
126
127    // These are used if enableAsyncWrites==true
128    private final AtomicBoolean stopWriter = new AtomicBoolean();
129    private Thread writerThread;
130    private CountDownLatch checkpointLatch;
131
132    // Keeps track of writes that are being written to disk.
133    private final TreeMap<Long, PageWrite> writes = new TreeMap<Long, PageWrite>();
134
135    // Keeps track of free pages.
136    private final AtomicLong nextFreePageId = new AtomicLong();
137    private SequenceSet freeList = new SequenceSet();
138
139    private AtomicReference<SequenceSet> recoveredFreeList = new AtomicReference<SequenceSet>();
140    private AtomicReference<SequenceSet> trackingFreeDuringRecovery = new AtomicReference<SequenceSet>();
141
142    private final AtomicLong nextTxid = new AtomicLong();
143
144    // Persistent settings stored in the page file.
145    private MetaData metaData;
146
147    private final ArrayList<File> tmpFilesForRemoval = new ArrayList<File>();
148
149    private boolean useLFRUEviction = false;
150    private float LFUEvictionFactor = 0.2f;
151
152    /**
153     * Use to keep track of updated pages which have not yet been committed.
154     */
155    static class PageWrite {
156        Page page;
157        byte[] current;
158        byte[] diskBound;
159        long currentLocation = -1;
160        long diskBoundLocation = -1;
161        File tmpFile;
162        int length;
163
164        public PageWrite(Page page, byte[] data) {
165            this.page = page;
166            current = data;
167        }
168
169        public PageWrite(Page page, long currentLocation, int length, File tmpFile) {
170            this.page = page;
171            this.currentLocation = currentLocation;
172            this.tmpFile = tmpFile;
173            this.length = length;
174        }
175
176        public void setCurrent(Page page, byte[] data) {
177            this.page = page;
178            current = data;
179            currentLocation = -1;
180            diskBoundLocation = -1;
181        }
182
183        public void setCurrentLocation(Page page, long location, int length) {
184            this.page = page;
185            this.currentLocation = location;
186            this.length = length;
187            this.current = null;
188        }
189
190        @Override
191        public String toString() {
192            return "[PageWrite:" + page.getPageId() + "-" + page.getType() + "]";
193        }
194
195        @SuppressWarnings("unchecked")
196        public Page getPage() {
197            return page;
198        }
199
200        public byte[] getDiskBound() throws IOException {
201            if (diskBound == null && diskBoundLocation != -1) {
202                diskBound = new byte[length];
203                try(RandomAccessFile file = new RandomAccessFile(tmpFile, "r")) {
204                    file.seek(diskBoundLocation);
205                    file.read(diskBound);
206                }
207                diskBoundLocation = -1;
208            }
209            return diskBound;
210        }
211
212        void begin() {
213            if (currentLocation != -1) {
214                diskBoundLocation = currentLocation;
215            } else {
216                diskBound = current;
217            }
218            current = null;
219            currentLocation = -1;
220        }
221
222        /**
223         * @return true if there is no pending writes to do.
224         */
225        boolean done() {
226            diskBoundLocation = -1;
227            diskBound = null;
228            return current == null || currentLocation == -1;
229        }
230
231        boolean isDone() {
232            return diskBound == null && diskBoundLocation == -1 && current == null && currentLocation == -1;
233        }
234    }
235
236    /**
237     * The MetaData object hold the persistent data associated with a PageFile object.
238     */
239    public static class MetaData {
240
241        String fileType;
242        String fileTypeVersion;
243
244        long metaDataTxId = -1;
245        int pageSize;
246        boolean cleanShutdown;
247        long lastTxId;
248        long freePages;
249
250        public String getFileType() {
251            return fileType;
252        }
253
254        public void setFileType(String fileType) {
255            this.fileType = fileType;
256        }
257
258        public String getFileTypeVersion() {
259            return fileTypeVersion;
260        }
261
262        public void setFileTypeVersion(String version) {
263            this.fileTypeVersion = version;
264        }
265
266        public long getMetaDataTxId() {
267            return metaDataTxId;
268        }
269
270        public void setMetaDataTxId(long metaDataTxId) {
271            this.metaDataTxId = metaDataTxId;
272        }
273
274        public int getPageSize() {
275            return pageSize;
276        }
277
278        public void setPageSize(int pageSize) {
279            this.pageSize = pageSize;
280        }
281
282        public boolean isCleanShutdown() {
283            return cleanShutdown;
284        }
285
286        public void setCleanShutdown(boolean cleanShutdown) {
287            this.cleanShutdown = cleanShutdown;
288        }
289
290        public long getLastTxId() {
291            return lastTxId;
292        }
293
294        public void setLastTxId(long lastTxId) {
295            this.lastTxId = lastTxId;
296        }
297
298        public long getFreePages() {
299            return freePages;
300        }
301
302        public void setFreePages(long value) {
303            this.freePages = value;
304        }
305    }
306
307    public Transaction tx() {
308        assertLoaded();
309        return new Transaction(this);
310    }
311
312    /**
313     * Creates a PageFile in the specified directory who's data files are named by name.
314     */
315    public PageFile(File directory, String name) {
316        this.directory = directory;
317        this.name = name;
318    }
319
320    /**
321     * Deletes the files used by the PageFile object.  This method can only be used when this object is not loaded.
322     *
323     * @throws IOException           if the files cannot be deleted.
324     * @throws IllegalStateException if this PageFile is loaded
325     */
326    public void delete() throws IOException {
327        if (loaded.get()) {
328            throw new IllegalStateException("Cannot delete page file data when the page file is loaded");
329        }
330        delete(getMainPageFile());
331        delete(getFreeFile());
332        delete(getRecoveryFile());
333    }
334
335    public void archive() throws IOException {
336        if (loaded.get()) {
337            throw new IllegalStateException("Cannot delete page file data when the page file is loaded");
338        }
339        long timestamp = System.currentTimeMillis();
340        archive(getMainPageFile(), String.valueOf(timestamp));
341        archive(getFreeFile(), String.valueOf(timestamp));
342        archive(getRecoveryFile(), String.valueOf(timestamp));
343    }
344
345    /**
346     * @param file
347     * @throws IOException
348     */
349    private void delete(File file) throws IOException {
350        if (file.exists() && !file.delete()) {
351            throw new IOException("Could not delete: " + file.getPath());
352        }
353    }
354
355    private void archive(File file, String suffix) throws IOException {
356        if (file.exists()) {
357            File archive = new File(file.getPath() + "-" + suffix);
358            if (!file.renameTo(archive)) {
359                throw new IOException("Could not archive: " + file.getPath() + " to " + file.getPath());
360            }
361        }
362    }
363
364    /**
365     * Loads the page file so that it can be accessed for read/write purposes.  This allocates OS resources.  If this is the
366     * first time the page file is loaded, then this creates the page file in the file system.
367     *
368     * @throws IOException           If the page file cannot be loaded. This could be cause the existing page file is corrupt is a bad version or if
369     *                               there was a disk error.
370     * @throws IllegalStateException If the page file was already loaded.
371     */
372    public void load() throws IOException, IllegalStateException {
373        if (loaded.compareAndSet(false, true)) {
374
375            if (enablePageCaching) {
376                if (isUseLFRUEviction()) {
377                    pageCache = Collections.synchronizedMap(new LFUCache<Long, Page>(pageCacheSize, getLFUEvictionFactor()));
378                } else {
379                    pageCache = Collections.synchronizedMap(new LRUCache<Long, Page>(pageCacheSize, pageCacheSize, 0.75f, true));
380                }
381            }
382
383            File file = getMainPageFile();
384            IOHelper.mkdirs(file.getParentFile());
385            writeFile = new RecoverableRandomAccessFile(file, "rw", false);
386            readFile = new RecoverableRandomAccessFile(file, "r");
387
388            if (readFile.length() > 0) {
389                // Load the page size setting cause that can't change once the file is created.
390                loadMetaData();
391                pageSize = metaData.getPageSize();
392            } else {
393                // Store the page size setting cause that can't change once the file is created.
394                metaData = new MetaData();
395                metaData.setFileType(PageFile.class.getName());
396                metaData.setFileTypeVersion("1");
397                metaData.setPageSize(getPageSize());
398                metaData.setCleanShutdown(true);
399                metaData.setFreePages(-1);
400                metaData.setLastTxId(0);
401                storeMetaData();
402            }
403
404            if (enableRecoveryFile) {
405                recoveryFile = new RecoverableRandomAccessFile(getRecoveryFile(), "rw");
406            }
407
408            if (metaData.isCleanShutdown()) {
409                nextTxid.set(metaData.getLastTxId() + 1);
410                if (metaData.getFreePages() > 0) {
411                    loadFreeList();
412                }
413            } else {
414                LOG.debug(toString() + ", Recovering page file...");
415                nextTxid.set(redoRecoveryUpdates());
416                trackingFreeDuringRecovery.set(new SequenceSet());
417            }
418
419            if (writeFile.length() < PAGE_FILE_HEADER_SIZE) {
420                writeFile.setLength(PAGE_FILE_HEADER_SIZE);
421            }
422            nextFreePageId.set((writeFile.length() - PAGE_FILE_HEADER_SIZE) / pageSize);
423
424            metaData.setCleanShutdown(false);
425            storeMetaData();
426            getFreeFile().delete();
427            startWriter();
428            if (trackingFreeDuringRecovery.get() != null) {
429                asyncFreePageRecovery(nextFreePageId.get());
430            }
431        } else {
432            throw new IllegalStateException("Cannot load the page file when it is already loaded.");
433        }
434    }
435
436    private void asyncFreePageRecovery(final long lastRecoveryPage) {
437        Thread thread = new Thread("KahaDB Index Free Page Recovery") {
438            @Override
439            public void run() {
440                try {
441                    recoverFreePages(lastRecoveryPage);
442                } catch (Throwable e) {
443                    if (loaded.get()) {
444                        LOG.warn("Error recovering index free page list", e);
445                    }
446                }
447            }
448        };
449        thread.setPriority(Thread.NORM_PRIORITY);
450        thread.setDaemon(true);
451        thread.start();
452    }
453
454    private void recoverFreePages(final long lastRecoveryPage) throws Exception {
455        LOG.info(toString() + ". Recovering pageFile free list due to prior unclean shutdown..");
456        SequenceSet newFreePages = new SequenceSet();
457        // need new pageFile instance to get unshared readFile
458        PageFile recoveryPageFile = new PageFile(directory, name);
459        recoveryPageFile.loadForRecovery(nextFreePageId.get());
460        try {
461            for (Iterator<Page> i = new Transaction(recoveryPageFile).iterator(true); i.hasNext(); ) {
462                Page page = i.next();
463
464                if (page.getPageId() >= lastRecoveryPage) {
465                    break;
466                }
467
468                if (page.getType() == Page.PAGE_FREE_TYPE) {
469                    newFreePages.add(page.getPageId());
470                }
471            }
472        } finally {
473            recoveryPageFile.readFile.close();
474        }
475
476        LOG.info(toString() + ". Recovered pageFile free list of size: " + newFreePages.rangeSize());
477        if (!newFreePages.isEmpty()) {
478
479            // allow flush (with index lock held) to merge eventually
480            recoveredFreeList.lazySet(newFreePages);
481        } else {
482            // If there is no free pages, set trackingFreeDuringRecovery to allow the broker to have a clean shutdown
483            trackingFreeDuringRecovery.set(null);
484        }
485    }
486
487    private void loadForRecovery(long nextFreePageIdSnap) throws Exception {
488        loaded.set(true);
489        enablePageCaching = false;
490        File file = getMainPageFile();
491        readFile = new RecoverableRandomAccessFile(file, "r");
492        loadMetaData();
493        pageSize = metaData.getPageSize();
494        enableRecoveryFile = false;
495        nextFreePageId.set(nextFreePageIdSnap);
496    }
497
498
499    /**
500     * Unloads a previously loaded PageFile.  This deallocates OS related resources like file handles.
501     * once unloaded, you can no longer use the page file to read or write Pages.
502     *
503     * @throws IOException           if there was a disk error occurred while closing the down the page file.
504     * @throws IllegalStateException if the PageFile is not loaded
505     */
506    public void unload() throws IOException {
507        if (loaded.compareAndSet(true, false)) {
508            flush();
509            try {
510                stopWriter();
511            } catch (InterruptedException e) {
512                throw new InterruptedIOException();
513            }
514
515            if (freeList.isEmpty()) {
516                metaData.setFreePages(0);
517            } else {
518                storeFreeList();
519                metaData.setFreePages(freeList.size());
520            }
521
522            metaData.setLastTxId(nextTxid.get() - 1);
523            if (trackingFreeDuringRecovery.get() != null) {
524                // async recovery incomplete, will have to try again
525                metaData.setCleanShutdown(false);
526            } else {
527                metaData.setCleanShutdown(true);
528            }
529            storeMetaData();
530
531            if (readFile != null) {
532                readFile.close();
533                readFile = null;
534                writeFile.close();
535                writeFile = null;
536                if (enableRecoveryFile) {
537                    recoveryFile.close();
538                    recoveryFile = null;
539                }
540                freeList.clear();
541                if (pageCache != null) {
542                    pageCache = null;
543                }
544                synchronized (writes) {
545                    writes.clear();
546                }
547            }
548        } else {
549            throw new IllegalStateException("Cannot unload the page file when it is not loaded");
550        }
551    }
552
553    public boolean isLoaded() {
554        return loaded.get();
555    }
556
557    public boolean isCleanShutdown() {
558        return metaData != null && metaData.isCleanShutdown();
559    }
560
561    public void allowIOResumption() {
562        loaded.set(true);
563    }
564
565    /**
566     * Flush and sync all write buffers to disk.
567     *
568     * @throws IOException If an disk error occurred.
569     */
570    public void flush() throws IOException {
571
572        if (enabledWriteThread && stopWriter.get()) {
573            throw new IOException("Page file already stopped: checkpointing is not allowed");
574        }
575
576        SequenceSet recovered = recoveredFreeList.get();
577        if (recovered != null) {
578            recoveredFreeList.lazySet(null);
579            SequenceSet inUse = trackingFreeDuringRecovery.get();
580            recovered.remove(inUse);
581            freeList.merge(recovered);
582
583            // all set for clean shutdown
584            trackingFreeDuringRecovery.set(null);
585            inUse.clear();
586        }
587
588        // Setup a latch that gets notified when all buffered writes hits the disk.
589        CountDownLatch checkpointLatch;
590        synchronized (writes) {
591            if (writes.isEmpty()) {
592                return;
593            }
594            if (enabledWriteThread) {
595                if (this.checkpointLatch == null) {
596                    this.checkpointLatch = new CountDownLatch(1);
597                }
598                checkpointLatch = this.checkpointLatch;
599                writes.notify();
600            } else {
601                writeBatch();
602                return;
603            }
604        }
605        try {
606            checkpointLatch.await();
607        } catch (InterruptedException e) {
608            InterruptedIOException ioe = new InterruptedIOException();
609            ioe.initCause(e);
610            throw ioe;
611        }
612    }
613
614
615    @Override
616    public String toString() {
617        return "Page File: " + getMainPageFile();
618    }
619
620    ///////////////////////////////////////////////////////////////////
621    // Private Implementation Methods
622    ///////////////////////////////////////////////////////////////////
623    private File getMainPageFile() {
624        return new File(directory, IOHelper.toFileSystemSafeName(name) + PAGEFILE_SUFFIX);
625    }
626
627    public File getFreeFile() {
628        return new File(directory, IOHelper.toFileSystemSafeName(name) + FREE_FILE_SUFFIX);
629    }
630
631    public File getRecoveryFile() {
632        return new File(directory, IOHelper.toFileSystemSafeName(name) + RECOVERY_FILE_SUFFIX);
633    }
634
635    public long toOffset(long pageId) {
636        return PAGE_FILE_HEADER_SIZE + (pageId * pageSize);
637    }
638
639    private void loadMetaData() throws IOException {
640
641        ByteArrayInputStream is;
642        MetaData v1 = new MetaData();
643        MetaData v2 = new MetaData();
644        try {
645            Properties p = new Properties();
646            byte[] d = new byte[PAGE_FILE_HEADER_SIZE / 2];
647            readFile.seek(0);
648            readFile.readFully(d);
649            is = new ByteArrayInputStream(d);
650            p.load(is);
651            IntrospectionSupport.setProperties(v1, p);
652        } catch (IOException e) {
653            v1 = null;
654        }
655
656        try {
657            Properties p = new Properties();
658            byte[] d = new byte[PAGE_FILE_HEADER_SIZE / 2];
659            readFile.seek(PAGE_FILE_HEADER_SIZE / 2);
660            readFile.readFully(d);
661            is = new ByteArrayInputStream(d);
662            p.load(is);
663            IntrospectionSupport.setProperties(v2, p);
664        } catch (IOException e) {
665            v2 = null;
666        }
667
668        if (v1 == null && v2 == null) {
669            throw new IOException("Could not load page file meta data");
670        }
671
672        if (v1 == null || v1.metaDataTxId < 0) {
673            metaData = v2;
674        } else if (v2 == null || v1.metaDataTxId < 0) {
675            metaData = v1;
676        } else if (v1.metaDataTxId == v2.metaDataTxId) {
677            metaData = v1; // use the first since the 2nd could be a partial..
678        } else {
679            metaData = v2; // use the second cause the first is probably a partial.
680        }
681    }
682
683    private void storeMetaData() throws IOException {
684        // Convert the metadata into a property format
685        metaData.metaDataTxId++;
686        Properties p = new Properties();
687        IntrospectionSupport.getProperties(metaData, p, null);
688
689        ByteArrayOutputStream os = new ByteArrayOutputStream(PAGE_FILE_HEADER_SIZE);
690        p.store(os, "");
691        if (os.size() > PAGE_FILE_HEADER_SIZE / 2) {
692            throw new IOException("Configuation is larger than: " + PAGE_FILE_HEADER_SIZE / 2);
693        }
694        // Fill the rest with space...
695        byte[] filler = new byte[(PAGE_FILE_HEADER_SIZE / 2) - os.size()];
696        Arrays.fill(filler, (byte) ' ');
697        os.write(filler);
698        os.flush();
699
700        byte[] d = os.toByteArray();
701
702        // So we don't loose it.. write it 2 times...
703        writeFile.seek(0);
704        writeFile.write(d);
705        writeFile.sync();
706        writeFile.seek(PAGE_FILE_HEADER_SIZE / 2);
707        writeFile.write(d);
708        writeFile.sync();
709    }
710
711    private void storeFreeList() throws IOException {
712        FileOutputStream os = new FileOutputStream(getFreeFile());
713        DataOutputStream dos = new DataOutputStream(os);
714        SequenceSet.Marshaller.INSTANCE.writePayload(freeList, dos);
715        dos.close();
716    }
717
718    private void loadFreeList() throws IOException {
719        freeList.clear();
720        FileInputStream is = new FileInputStream(getFreeFile());
721        DataInputStream dis = new DataInputStream(is);
722        freeList = SequenceSet.Marshaller.INSTANCE.readPayload(dis);
723        dis.close();
724    }
725
726    ///////////////////////////////////////////////////////////////////
727    // Property Accessors
728    ///////////////////////////////////////////////////////////////////
729
730    /**
731     * Is the recovery buffer used to double buffer page writes.  Enabled by default.
732     *
733     * @return is the recovery buffer enabled.
734     */
735    public boolean isEnableRecoveryFile() {
736        return enableRecoveryFile;
737    }
738
739    /**
740     * Sets if the recovery buffer uses to double buffer page writes.  Enabled by default.  Disabling this
741     * may potentially cause partial page writes which can lead to page file corruption.
742     */
743    public void setEnableRecoveryFile(boolean doubleBuffer) {
744        assertNotLoaded();
745        this.enableRecoveryFile = doubleBuffer;
746    }
747
748    /**
749     * @return Are page writes synced to disk?
750     */
751    public boolean isEnableDiskSyncs() {
752        return enableDiskSyncs;
753    }
754
755    /**
756     * Allows you enable syncing writes to disk.
757     */
758    public void setEnableDiskSyncs(boolean syncWrites) {
759        assertNotLoaded();
760        this.enableDiskSyncs = syncWrites;
761    }
762
763    /**
764     * @return the page size
765     */
766    public int getPageSize() {
767        return this.pageSize;
768    }
769
770    /**
771     * @return the amount of content data that a page can hold.
772     */
773    public int getPageContentSize() {
774        return this.pageSize - Page.PAGE_HEADER_SIZE;
775    }
776
777    /**
778     * Configures the page size used by the page file.  By default it is 4k.  Once a page file is created on disk,
779     * subsequent loads of that file will use the original pageSize.  Once the PageFile is loaded, this setting
780     * can no longer be changed.
781     *
782     * @param pageSize the pageSize to set
783     * @throws IllegalStateException once the page file is loaded.
784     */
785    public void setPageSize(int pageSize) throws IllegalStateException {
786        assertNotLoaded();
787        this.pageSize = pageSize;
788    }
789
790    /**
791     * @return true if read page caching is enabled
792     */
793    public boolean isEnablePageCaching() {
794        return this.enablePageCaching;
795    }
796
797    /**
798     * @param enablePageCaching allows you to enable read page caching
799     */
800    public void setEnablePageCaching(boolean enablePageCaching) {
801        assertNotLoaded();
802        this.enablePageCaching = enablePageCaching;
803    }
804
805    /**
806     * @return the maximum number of pages that will get stored in the read page cache.
807     */
808    public int getPageCacheSize() {
809        return this.pageCacheSize;
810    }
811
812    /**
813     * @param pageCacheSize Sets the maximum number of pages that will get stored in the read page cache.
814     */
815    public void setPageCacheSize(int pageCacheSize) {
816        assertNotLoaded();
817        this.pageCacheSize = pageCacheSize;
818    }
819
820    public boolean isEnabledWriteThread() {
821        return enabledWriteThread;
822    }
823
824    public void setEnableWriteThread(boolean enableAsyncWrites) {
825        assertNotLoaded();
826        this.enabledWriteThread = enableAsyncWrites;
827    }
828
829    public long getDiskSize() throws IOException {
830        return toOffset(nextFreePageId.get());
831    }
832
833    public boolean isFreePage(long pageId) {
834        return freeList.contains(pageId);
835    }
836    /**
837     * @return the number of pages allocated in the PageFile
838     */
839    public long getPageCount() {
840        return nextFreePageId.get();
841    }
842
843    public int getRecoveryFileMinPageCount() {
844        return recoveryFileMinPageCount;
845    }
846
847    public long getFreePageCount() {
848        assertLoaded();
849        return freeList.rangeSize();
850    }
851
852    public void setRecoveryFileMinPageCount(int recoveryFileMinPageCount) {
853        assertNotLoaded();
854        this.recoveryFileMinPageCount = recoveryFileMinPageCount;
855    }
856
857    public int getRecoveryFileMaxPageCount() {
858        return recoveryFileMaxPageCount;
859    }
860
861    public void setRecoveryFileMaxPageCount(int recoveryFileMaxPageCount) {
862        assertNotLoaded();
863        this.recoveryFileMaxPageCount = recoveryFileMaxPageCount;
864    }
865
866    public int getWriteBatchSize() {
867        return writeBatchSize;
868    }
869
870    public void setWriteBatchSize(int writeBatchSize) {
871        this.writeBatchSize = writeBatchSize;
872    }
873
874    public float getLFUEvictionFactor() {
875        return LFUEvictionFactor;
876    }
877
878    public void setLFUEvictionFactor(float LFUEvictionFactor) {
879        this.LFUEvictionFactor = LFUEvictionFactor;
880    }
881
882    public boolean isUseLFRUEviction() {
883        return useLFRUEviction;
884    }
885
886    public void setUseLFRUEviction(boolean useLFRUEviction) {
887        this.useLFRUEviction = useLFRUEviction;
888    }
889
890    ///////////////////////////////////////////////////////////////////
891    // Package Protected Methods exposed to Transaction
892    ///////////////////////////////////////////////////////////////////
893
894    /**
895     * @throws IllegalStateException if the page file is not loaded.
896     */
897    void assertLoaded() throws IllegalStateException {
898        if (!loaded.get()) {
899            throw new IllegalStateException("PageFile is not loaded");
900        }
901    }
902
903    void assertNotLoaded() throws IllegalStateException {
904        if (loaded.get()) {
905            throw new IllegalStateException("PageFile is loaded");
906        }
907    }
908
909    /**
910     * Allocates a block of free pages that you can write data to.
911     *
912     * @param count the number of sequential pages to allocate
913     * @return the first page of the sequential set.
914     * @throws IOException           If an disk error occurred.
915     * @throws IllegalStateException if the PageFile is not loaded
916     */
917    <T> Page<T> allocate(int count) throws IOException {
918        assertLoaded();
919        if (count <= 0) {
920            throw new IllegalArgumentException("The allocation count must be larger than zero");
921        }
922
923        Sequence seq = freeList.removeFirstSequence(count);
924
925        // We may need to create new free pages...
926        if (seq == null) {
927
928            Page<T> first = null;
929            int c = count;
930
931            // Perform the id's only once....
932            long pageId = nextFreePageId.getAndAdd(count);
933            long writeTxnId = nextTxid.getAndAdd(count);
934
935            while (c-- > 0) {
936                Page<T> page = new Page<T>(pageId++);
937                page.makeFree(writeTxnId++);
938
939                if (first == null) {
940                    first = page;
941                }
942
943                addToCache(page);
944                DataByteArrayOutputStream out = new DataByteArrayOutputStream(pageSize);
945                page.write(out);
946                write(page, out.getData());
947
948                // LOG.debug("allocate writing: "+page.getPageId());
949            }
950
951            return first;
952        }
953
954        Page<T> page = new Page<T>(seq.getFirst());
955        page.makeFree(0);
956        // LOG.debug("allocated: "+page.getPageId());
957        return page;
958    }
959
960    long getNextWriteTransactionId() {
961        return nextTxid.incrementAndGet();
962    }
963
964    synchronized void readPage(long pageId, byte[] data) throws IOException {
965        readFile.seek(toOffset(pageId));
966        readFile.readFully(data);
967    }
968
969    public void freePage(long pageId) {
970        freeList.add(pageId);
971        removeFromCache(pageId);
972
973        SequenceSet trackFreeDuringRecovery = trackingFreeDuringRecovery.get();
974        if (trackFreeDuringRecovery != null) {
975            trackFreeDuringRecovery.add(pageId);
976        }
977    }
978
979    @SuppressWarnings("unchecked")
980    private <T> void write(Page<T> page, byte[] data) throws IOException {
981        final PageWrite write = new PageWrite(page, data);
982        Entry<Long, PageWrite> entry = new Entry<Long, PageWrite>() {
983            @Override
984            public Long getKey() {
985                return write.getPage().getPageId();
986            }
987
988            @Override
989            public PageWrite getValue() {
990                return write;
991            }
992
993            @Override
994            public PageWrite setValue(PageWrite value) {
995                return null;
996            }
997        };
998        Entry<Long, PageWrite>[] entries = new Map.Entry[]{entry};
999        write(Arrays.asList(entries));
1000    }
1001
1002    void write(Collection<Map.Entry<Long, PageWrite>> updates) throws IOException {
1003        synchronized (writes) {
1004            if (enabledWriteThread) {
1005                while (writes.size() >= writeBatchSize && !stopWriter.get()) {
1006                    try {
1007                        writes.wait();
1008                    } catch (InterruptedException e) {
1009                        Thread.currentThread().interrupt();
1010                        throw new InterruptedIOException();
1011                    }
1012                }
1013            }
1014
1015            boolean longTx = false;
1016
1017            for (Map.Entry<Long, PageWrite> entry : updates) {
1018                Long key = entry.getKey();
1019                PageWrite value = entry.getValue();
1020                PageWrite write = writes.get(key);
1021                if (write == null) {
1022                    writes.put(key, value);
1023                } else {
1024                    if (value.currentLocation != -1) {
1025                        write.setCurrentLocation(value.page, value.currentLocation, value.length);
1026                        write.tmpFile = value.tmpFile;
1027                        longTx = true;
1028                    } else {
1029                        write.setCurrent(value.page, value.current);
1030                    }
1031                }
1032            }
1033
1034            // Once we start approaching capacity, notify the writer to start writing
1035            // sync immediately for long txs
1036            if (longTx || canStartWriteBatch()) {
1037
1038                if (enabledWriteThread) {
1039                    writes.notify();
1040                } else {
1041                    writeBatch();
1042                }
1043            }
1044        }
1045    }
1046
1047    private boolean canStartWriteBatch() {
1048        int capacityUsed = ((writes.size() * 100) / writeBatchSize);
1049        if (enabledWriteThread) {
1050            // The constant 10 here controls how soon write batches start going to disk..
1051            // would be nice to figure out how to auto tune that value.  Make to small and
1052            // we reduce through put because we are locking the write mutex too often doing writes
1053            return capacityUsed >= 10 || checkpointLatch != null;
1054        } else {
1055            return capacityUsed >= 80 || checkpointLatch != null;
1056        }
1057    }
1058
1059    ///////////////////////////////////////////////////////////////////
1060    // Cache Related operations
1061    ///////////////////////////////////////////////////////////////////
1062    @SuppressWarnings("unchecked")
1063    <T> Page<T> getFromCache(long pageId) {
1064        synchronized (writes) {
1065            PageWrite pageWrite = writes.get(pageId);
1066            if (pageWrite != null) {
1067                return pageWrite.page;
1068            }
1069        }
1070
1071        Page<T> result = null;
1072        if (enablePageCaching) {
1073            result = pageCache.get(pageId);
1074        }
1075        return result;
1076    }
1077
1078    void addToCache(Page page) {
1079        if (enablePageCaching) {
1080            pageCache.put(page.getPageId(), page);
1081        }
1082    }
1083
1084    void removeFromCache(long pageId) {
1085        if (enablePageCaching) {
1086            pageCache.remove(pageId);
1087        }
1088    }
1089
1090    ///////////////////////////////////////////////////////////////////
1091    // Internal Double write implementation follows...
1092    ///////////////////////////////////////////////////////////////////
1093
1094    private void pollWrites() {
1095        try {
1096            while (!stopWriter.get()) {
1097                // Wait for a notification...
1098                synchronized (writes) {
1099                    writes.notifyAll();
1100
1101                    // If there is not enough to write, wait for a notification...
1102                    while (writes.isEmpty() && checkpointLatch == null && !stopWriter.get()) {
1103                        writes.wait(100);
1104                    }
1105
1106                    if (writes.isEmpty()) {
1107                        releaseCheckpointWaiter();
1108                    }
1109                }
1110                writeBatch();
1111            }
1112        } catch (Throwable e) {
1113            LOG.info("An exception was raised while performing poll writes", e);
1114        } finally {
1115            releaseCheckpointWaiter();
1116        }
1117    }
1118
1119    private void writeBatch() throws IOException {
1120
1121        CountDownLatch checkpointLatch;
1122        ArrayList<PageWrite> batch;
1123        synchronized (writes) {
1124            // If there is not enough to write, wait for a notification...
1125
1126            batch = new ArrayList<PageWrite>(writes.size());
1127            // build a write batch from the current write cache.
1128            for (PageWrite write : writes.values()) {
1129                batch.add(write);
1130                // Move the current write to the diskBound write, this lets folks update the
1131                // page again without blocking for this write.
1132                write.begin();
1133                if (write.diskBound == null && write.diskBoundLocation == -1) {
1134                    batch.remove(write);
1135                }
1136            }
1137
1138            // Grab on to the existing checkpoint latch cause once we do this write we can
1139            // release the folks that were waiting for those writes to hit disk.
1140            checkpointLatch = this.checkpointLatch;
1141            this.checkpointLatch = null;
1142        }
1143
1144        // First land the writes in the recovery file
1145        if (enableRecoveryFile) {
1146            Checksum checksum = new Adler32();
1147
1148            recoveryFile.seek(RECOVERY_FILE_HEADER_SIZE);
1149
1150            for (PageWrite w : batch) {
1151                try {
1152                    checksum.update(w.getDiskBound(), 0, pageSize);
1153                } catch (Throwable t) {
1154                    throw IOExceptionSupport.create("Cannot create recovery file. Reason: " + t, t);
1155                }
1156                recoveryFile.writeLong(w.page.getPageId());
1157                recoveryFile.write(w.getDiskBound(), 0, pageSize);
1158            }
1159
1160            // Can we shrink the recovery buffer??
1161            if (recoveryPageCount > recoveryFileMaxPageCount) {
1162                int t = Math.max(recoveryFileMinPageCount, batch.size());
1163                recoveryFile.setLength(recoveryFileSizeForPages(t));
1164            }
1165
1166            // Record the page writes in the recovery buffer.
1167            recoveryFile.seek(0);
1168            // Store the next tx id...
1169            recoveryFile.writeLong(nextTxid.get());
1170            // Store the checksum for thw write batch so that on recovery we
1171            // know if we have a consistent
1172            // write batch on disk.
1173            recoveryFile.writeLong(checksum.getValue());
1174            // Write the # of pages that will follow
1175            recoveryFile.writeInt(batch.size());
1176
1177            if (enableDiskSyncs) {
1178                recoveryFile.sync();
1179            }
1180        }
1181
1182        try {
1183            for (PageWrite w : batch) {
1184                writeFile.seek(toOffset(w.page.getPageId()));
1185                writeFile.write(w.getDiskBound(), 0, pageSize);
1186                w.done();
1187            }
1188
1189            if (enableDiskSyncs) {
1190                writeFile.sync();
1191            }
1192
1193        } catch (IOException ioError) {
1194            LOG.info("Unexpected io error on pagefile write of " + batch.size() + " pages.", ioError);
1195            // any subsequent write needs to be prefaced with a considered call to redoRecoveryUpdates
1196            // to ensure disk image is self consistent
1197            loaded.set(false);
1198            throw  ioError;
1199        } finally {
1200            synchronized (writes) {
1201                for (PageWrite w : batch) {
1202                    // If there are no more pending writes, then remove it from
1203                    // the write cache.
1204                    if (w.isDone()) {
1205                        writes.remove(w.page.getPageId());
1206                        if (w.tmpFile != null && tmpFilesForRemoval.contains(w.tmpFile)) {
1207                            if (!w.tmpFile.delete()) {
1208                                throw new IOException("Can't delete temporary KahaDB transaction file:" + w.tmpFile);
1209                            }
1210                            tmpFilesForRemoval.remove(w.tmpFile);
1211                        }
1212                    }
1213                }
1214            }
1215
1216            if (checkpointLatch != null) {
1217                checkpointLatch.countDown();
1218            }
1219        }
1220    }
1221
1222    public void removeTmpFile(File file) {
1223        tmpFilesForRemoval.add(file);
1224    }
1225
1226    private long recoveryFileSizeForPages(int pageCount) {
1227        return RECOVERY_FILE_HEADER_SIZE + ((pageSize + 8) * pageCount);
1228    }
1229
1230    private void releaseCheckpointWaiter() {
1231        if (checkpointLatch != null) {
1232            checkpointLatch.countDown();
1233            checkpointLatch = null;
1234        }
1235    }
1236
1237    /**
1238     * Inspects the recovery buffer and re-applies any
1239     * partially applied page writes.
1240     *
1241     * @return the next transaction id that can be used.
1242     */
1243    private long redoRecoveryUpdates() throws IOException {
1244        if (!enableRecoveryFile) {
1245            return 0;
1246        }
1247        recoveryPageCount = 0;
1248
1249        // Are we initializing the recovery file?
1250        if (recoveryFile.length() == 0) {
1251            // Write an empty header..
1252            recoveryFile.write(new byte[RECOVERY_FILE_HEADER_SIZE]);
1253            // Preallocate the minium size for better performance.
1254            recoveryFile.setLength(recoveryFileSizeForPages(recoveryFileMinPageCount));
1255            return 0;
1256        }
1257
1258        // How many recovery pages do we have in the recovery buffer?
1259        recoveryFile.seek(0);
1260        long nextTxId = recoveryFile.readLong();
1261        long expectedChecksum = recoveryFile.readLong();
1262        int pageCounter = recoveryFile.readInt();
1263
1264        recoveryFile.seek(RECOVERY_FILE_HEADER_SIZE);
1265        Checksum checksum = new Adler32();
1266        LinkedHashMap<Long, byte[]> batch = new LinkedHashMap<Long, byte[]>();
1267        try {
1268            for (int i = 0; i < pageCounter; i++) {
1269                long offset = recoveryFile.readLong();
1270                byte[] data = new byte[pageSize];
1271                if (recoveryFile.read(data, 0, pageSize) != pageSize) {
1272                    // Invalid recovery record, Could not fully read the data". Probably due to a partial write to the recovery buffer
1273                    return nextTxId;
1274                }
1275                checksum.update(data, 0, pageSize);
1276                batch.put(offset, data);
1277            }
1278        } catch (Exception e) {
1279            // If an error occurred it was cause the redo buffer was not full written out correctly.. so don't redo it.
1280            // as the pages should still be consistent.
1281            LOG.debug("Redo buffer was not fully intact: ", e);
1282            return nextTxId;
1283        }
1284
1285        recoveryPageCount = pageCounter;
1286
1287        // If the checksum is not valid then the recovery buffer was partially written to disk.
1288        if (checksum.getValue() != expectedChecksum) {
1289            return nextTxId;
1290        }
1291
1292        // Re-apply all the writes in the recovery buffer.
1293        for (Map.Entry<Long, byte[]> e : batch.entrySet()) {
1294            writeFile.seek(toOffset(e.getKey()));
1295            writeFile.write(e.getValue());
1296        }
1297
1298        // And sync it to disk
1299        writeFile.sync();
1300        return nextTxId;
1301    }
1302
1303    private void startWriter() {
1304        synchronized (writes) {
1305            if (enabledWriteThread) {
1306                stopWriter.set(false);
1307                writerThread = new Thread("KahaDB Page Writer") {
1308                    @Override
1309                    public void run() {
1310                        pollWrites();
1311                    }
1312                };
1313                writerThread.setPriority(Thread.MAX_PRIORITY);
1314                writerThread.setDaemon(true);
1315                writerThread.start();
1316            }
1317        }
1318    }
1319
1320    private void stopWriter() throws InterruptedException {
1321        if (enabledWriteThread) {
1322            stopWriter.set(true);
1323            writerThread.join();
1324        }
1325    }
1326
1327    public File getFile() {
1328        return getMainPageFile();
1329    }
1330
1331    public File getDirectory() {
1332        return directory;
1333    }
1334}