From 100c799b15f258c407fd2b52b750ad7000a62eb0 Mon Sep 17 00:00:00 2001
From: "harri.renney" <Renney77@gmail.com>
Date: Tue, 12 Feb 2019 15:29:34 +0000
Subject: [PATCH] File logging, adjustements.

---
 AudioFile.cpp          | 903 +++++++++++++++++++++++++++++++++++++++++
 AudioFile.h            | 181 +++++++++
 CPU_FDTD.hpp           |  15 +-
 Kernels/fdtdLocal.cl   |   2 +-
 OpenCL_FDTD.hpp        | 113 ++++--
 OpenGL_FDTD.hpp        |  29 +-
 Shaders/render_fs.glsl |   6 +-
 Wavetable Exciter.hpp  |   2 +-
 main.cpp               | 297 +++++++-------
 parameters.json        |  27 +-
 tinywav.c              | 258 ------------
 tinywav.h              | 129 ------
 12 files changed, 1359 insertions(+), 603 deletions(-)
 create mode 100644 AudioFile.cpp
 create mode 100644 AudioFile.h
 delete mode 100644 tinywav.c
 delete mode 100644 tinywav.h

diff --git a/AudioFile.cpp b/AudioFile.cpp
new file mode 100644
index 0000000..9e4b266
--- /dev/null
+++ b/AudioFile.cpp
@@ -0,0 +1,903 @@
+//=======================================================================
+/** @file AudioFile.cpp
+ *  @author Adam Stark
+ *  @copyright Copyright (C) 2017  Adam Stark
+ *
+ * This file is part of the 'AudioFile' library
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+//=======================================================================
+
+#include "AudioFile.h"
+#include <fstream>
+#include <unordered_map>
+#include <iterator>
+
+//=============================================================
+// Pre-defined 10-byte representations of common sample rates
+std::unordered_map <uint32_t, std::vector<uint8_t>> aiffSampleRateTable = {
+    {8000, {64, 11, 250, 0, 0, 0, 0, 0, 0, 0}},
+    {11025, {64, 12, 172, 68, 0, 0, 0, 0, 0, 0}},
+    {16000, {64, 12, 250, 0, 0, 0, 0, 0, 0, 0}},
+    {22050, {64, 13, 172, 68, 0, 0, 0, 0, 0, 0}},
+    {32000, {64, 13, 250, 0, 0, 0, 0, 0, 0, 0}},
+    {37800, {64, 14, 147, 168, 0, 0, 0, 0, 0, 0}},
+    {44056, {64, 14, 172, 24, 0, 0, 0, 0, 0, 0}},
+    {44100, {64, 14, 172, 68, 0, 0, 0, 0, 0, 0}},
+    {47250, {64, 14, 184, 146, 0, 0, 0, 0, 0, 0}},
+    {48000, {64, 14, 187, 128, 0, 0, 0, 0, 0, 0}},
+    {50000, {64, 14, 195, 80, 0, 0, 0, 0, 0, 0}},
+    {50400, {64, 14, 196, 224, 0, 0, 0, 0, 0, 0}},
+    {88200, {64, 15, 172, 68, 0, 0, 0, 0, 0, 0}},
+    {96000, {64, 15, 187, 128, 0, 0, 0, 0, 0, 0}},
+    {176400, {64, 16, 172, 68, 0, 0, 0, 0, 0, 0}},
+    {192000, {64, 16, 187, 128, 0, 0, 0, 0, 0, 0}},
+    {352800, {64, 17, 172, 68, 0, 0, 0, 0, 0, 0}},
+    {2822400, {64, 20, 172, 68, 0, 0, 0, 0, 0, 0}},
+    {5644800, {64, 21, 172, 68, 0, 0, 0, 0, 0, 0}}
+};
+
+//=============================================================
+template <class T>
+AudioFile<T>::AudioFile()
+{
+    bitDepth = 16;
+    sampleRate = 44100;
+    samples.resize (1);
+    samples[0].resize (0);
+    audioFileFormat = AudioFileFormat::NotLoaded;
+}
+
+//=============================================================
+template <class T>
+uint32_t AudioFile<T>::getSampleRate() const
+{
+    return sampleRate;
+}
+
+//=============================================================
+template <class T>
+int AudioFile<T>::getNumChannels() const
+{
+    return (int)samples.size();
+}
+
+//=============================================================
+template <class T>
+bool AudioFile<T>::isMono() const
+{
+    return getNumChannels() == 1;
+}
+
+//=============================================================
+template <class T>
+bool AudioFile<T>::isStereo() const
+{
+    return getNumChannels() == 2;
+}
+
+//=============================================================
+template <class T>
+int AudioFile<T>::getBitDepth() const
+{
+    return bitDepth;
+}
+
+//=============================================================
+template <class T>
+int AudioFile<T>::getNumSamplesPerChannel() const
+{
+    if (samples.size() > 0)
+        return (int) samples[0].size();
+    else
+        return 0;
+}
+
+//=============================================================
+template <class T>
+double AudioFile<T>::getLengthInSeconds() const
+{
+    return (double)getNumSamplesPerChannel() / (double)sampleRate;
+}
+
+//=============================================================
+template <class T>
+void AudioFile<T>::printSummary() const
+{
+    std::cout << "|======================================|" << std::endl;
+    std::cout << "Num Channels: " << getNumChannels() << std::endl;
+    std::cout << "Num Samples Per Channel: " << getNumSamplesPerChannel() << std::endl;
+    std::cout << "Sample Rate: " << sampleRate << std::endl;
+    std::cout << "Bit Depth: " << bitDepth << std::endl;
+    std::cout << "Length in Seconds: " << getLengthInSeconds() << std::endl;
+    std::cout << "|======================================|" << std::endl;
+}
+
+//=============================================================
+template <class T>
+bool AudioFile<T>::setAudioBuffer (AudioBuffer& newBuffer)
+{
+    int numChannels = (int)newBuffer.size();
+    
+    if (numChannels <= 0)
+    {
+        assert (false && "The buffer your are trying to use has no channels");
+        return false;
+    }
+    
+    int numSamples = (int)newBuffer[0].size();
+    
+    // set the number of channels
+    samples.resize (newBuffer.size());
+    
+    for (int k = 0; k < getNumChannels(); k++)
+    {
+        assert (newBuffer[k].size() == numSamples);
+        
+        samples[k].resize (numSamples);
+        
+        for (int i = 0; i < numSamples; i++)
+        {
+            samples[k][i] = newBuffer[k][i];
+        }
+    }
+    
+    return true;
+}
+
+//=============================================================
+template <class T>
+void AudioFile<T>::setAudioBufferSize (int numChannels, int numSamples)
+{
+    samples.resize (numChannels);
+    setNumSamplesPerChannel (numSamples);
+}
+
+//=============================================================
+template <class T>
+void AudioFile<T>::setNumSamplesPerChannel (int numSamples)
+{
+    int originalSize = getNumSamplesPerChannel();
+    
+    for (int i = 0; i < getNumChannels();i++)
+    {
+        samples[i].resize (numSamples);
+        
+        // set any new samples to zero
+        if (numSamples > originalSize)
+            std::fill (samples[i].begin() + originalSize, samples[i].end(), (T)0.);
+    }
+}
+
+//=============================================================
+template <class T>
+void AudioFile<T>::setNumChannels (int numChannels)
+{
+    int originalNumChannels = getNumChannels();
+    int originalNumSamplesPerChannel = getNumSamplesPerChannel();
+    
+    samples.resize (numChannels);
+    
+    // make sure any new channels are set to the right size
+    // and filled with zeros
+    if (numChannels > originalNumChannels)
+    {
+        for (int i = originalNumChannels; i < numChannels; i++)
+        {
+            samples[i].resize (originalNumSamplesPerChannel);
+            std::fill (samples[i].begin(), samples[i].end(), (T)0.);
+        }
+    }
+}
+
+//=============================================================
+template <class T>
+void AudioFile<T>::setBitDepth (int numBitsPerSample)
+{
+    bitDepth = numBitsPerSample;
+}
+
+//=============================================================
+template <class T>
+void AudioFile<T>::setSampleRate (uint32_t newSampleRate)
+{
+    sampleRate = newSampleRate;
+}
+
+//=============================================================
+template <class T>
+bool AudioFile<T>::load (std::string filePath)
+{
+    std::ifstream file (filePath, std::ios::binary);
+    
+    // check the file exists
+    if (! file.good())
+    {
+        std::cout << "ERROR: File doesn't exist or otherwise can't load file" << std::endl;
+        std::cout << filePath << std::endl;
+        return false;
+    }
+    
+    file.unsetf (std::ios::skipws);
+    std::istream_iterator<uint8_t> begin (file), end;
+    std::vector<uint8_t> fileData (begin, end);
+    
+    // get audio file format
+    audioFileFormat = determineAudioFileFormat (fileData);
+    
+    if (audioFileFormat == AudioFileFormat::Wave)
+    {
+        return decodeWaveFile (fileData);
+    }
+    else if (audioFileFormat == AudioFileFormat::Aiff)
+    {
+        return decodeAiffFile (fileData);
+    }
+    else
+    {
+        std::cout << "Audio File Type: " << "Error" << std::endl;
+        return false;
+    }
+}
+
+//=============================================================
+template <class T>
+bool AudioFile<T>::decodeWaveFile (std::vector<uint8_t>& fileData)
+{
+    // -----------------------------------------------------------
+    // HEADER CHUNK
+    std::string headerChunkID (fileData.begin(), fileData.begin() + 4);
+    //int32_t fileSizeInBytes = fourBytesToInt (fileData, 4) + 8;
+    std::string format (fileData.begin() + 8, fileData.begin() + 12);
+    
+    // -----------------------------------------------------------
+    // try and find the start points of key chunks
+    int indexOfDataChunk = getIndexOfString (fileData, "data");
+    int indexOfFormatChunk = getIndexOfString (fileData, "fmt");
+    
+    // if we can't find the data or format chunks, or the IDs/formats don't seem to be as expected
+    // then it is unlikely we'll able to read this file, so abort
+    if (indexOfDataChunk == -1 || indexOfFormatChunk == -1 || headerChunkID != "RIFF" || format != "WAVE")
+    {
+        std::cout << "ERROR: this doesn't seem to be a valid .WAV file" << std::endl;
+        return false;
+    }
+    
+    // -----------------------------------------------------------
+    // FORMAT CHUNK
+    int f = indexOfFormatChunk;
+    std::string formatChunkID (fileData.begin() + f, fileData.begin() + f + 4);
+    //int32_t formatChunkSize = fourBytesToInt (fileData, f + 4);
+    int16_t audioFormat = twoBytesToInt (fileData, f + 8);
+    int16_t numChannels = twoBytesToInt (fileData, f + 10);
+    sampleRate = (uint32_t) fourBytesToInt (fileData, f + 12);
+    int32_t numBytesPerSecond = fourBytesToInt (fileData, f + 16);
+    int16_t numBytesPerBlock = twoBytesToInt (fileData, f + 20);
+    bitDepth = (int) twoBytesToInt (fileData, f + 22);
+    
+    int numBytesPerSample = bitDepth / 8;
+    
+    // check that the audio format is PCM
+    if (audioFormat != 1)
+    {
+        std::cout << "ERROR: this is a compressed .WAV file and this library does not support decoding them at present" << std::endl;
+        return false;
+    }
+    
+    // check the number of channels is mono or stereo
+    if (numChannels < 1 ||numChannels > 2)
+    {
+        std::cout << "ERROR: this WAV file seems to be neither mono nor stereo (perhaps multi-track, or corrupted?)" << std::endl;
+        return false;
+    }
+    
+    // check header data is consistent
+    if ((numBytesPerSecond != (numChannels * sampleRate * bitDepth) / 8) || (numBytesPerBlock != (numChannels * numBytesPerSample)))
+    {
+        std::cout << "ERROR: the header data in this WAV file seems to be inconsistent" << std::endl;
+        return false;
+    }
+    
+    // check bit depth is either 8, 16 or 24 bit
+    if (bitDepth != 8 && bitDepth != 16 && bitDepth != 24)
+    {
+        std::cout << "ERROR: this file has a bit depth that is not 8, 16 or 24 bits" << std::endl;
+        return false;
+    }
+    
+    // -----------------------------------------------------------
+    // DATA CHUNK
+    int d = indexOfDataChunk;
+    std::string dataChunkID (fileData.begin() + d, fileData.begin() + d + 4);
+    int32_t dataChunkSize = fourBytesToInt (fileData, d + 4);
+    
+    int numSamples = dataChunkSize / (numChannels * bitDepth / 8);
+    int samplesStartIndex = indexOfDataChunk + 8;
+    
+    clearAudioBuffer();
+    samples.resize (numChannels);
+    
+    for (int i = 0; i < numSamples; i++)
+    {
+        for (int channel = 0; channel < numChannels; channel++)
+        {
+            int sampleIndex = samplesStartIndex + (numBytesPerBlock * i) + channel * numBytesPerSample;
+            
+            if (bitDepth == 8)
+            {
+                T sample = singleByteToSample (fileData[sampleIndex]);
+                samples[channel].push_back (sample);
+            }
+            else if (bitDepth == 16)
+            {
+                int16_t sampleAsInt = twoBytesToInt (fileData, sampleIndex);
+                T sample = sixteenBitIntToSample (sampleAsInt);
+                samples[channel].push_back (sample);
+            }
+            else if (bitDepth == 24)
+            {
+                int32_t sampleAsInt = 0;
+                sampleAsInt = (fileData[sampleIndex + 2] << 16) | (fileData[sampleIndex + 1] << 8) | fileData[sampleIndex];
+                
+                if (sampleAsInt & 0x800000) //  if the 24th bit is set, this is a negative number in 24-bit world
+                    sampleAsInt = sampleAsInt | ~0xFFFFFF; // so make sure sign is extended to the 32 bit float
+
+                T sample = (T)sampleAsInt / (T)8388608.;
+                samples[channel].push_back (sample);
+            }
+            else
+            {
+                assert (false);
+            }
+        }
+    }
+
+    return true;
+}
+
+//=============================================================
+template <class T>
+bool AudioFile<T>::decodeAiffFile (std::vector<uint8_t>& fileData)
+{
+    // -----------------------------------------------------------
+    // HEADER CHUNK
+    std::string headerChunkID (fileData.begin(), fileData.begin() + 4);
+    //int32_t fileSizeInBytes = fourBytesToInt (fileData, 4, Endianness::BigEndian) + 8;
+    std::string format (fileData.begin() + 8, fileData.begin() + 12);
+    
+    // -----------------------------------------------------------
+    // try and find the start points of key chunks
+    int indexOfCommChunk = getIndexOfString (fileData, "COMM");
+    int indexOfSoundDataChunk = getIndexOfString (fileData, "SSND");
+    
+    // if we can't find the data or format chunks, or the IDs/formats don't seem to be as expected
+    // then it is unlikely we'll able to read this file, so abort
+    if (indexOfSoundDataChunk == -1 || indexOfCommChunk == -1 || headerChunkID != "FORM" || format != "AIFF")
+    {
+        std::cout << "ERROR: this doesn't seem to be a valid AIFF file" << std::endl;
+        return false;
+    }
+
+    // -----------------------------------------------------------
+    // COMM CHUNK
+    int p = indexOfCommChunk;
+    std::string commChunkID (fileData.begin() + p, fileData.begin() + p + 4);
+    //int32_t commChunkSize = fourBytesToInt (fileData, p + 4, Endianness::BigEndian);
+    int16_t numChannels = twoBytesToInt (fileData, p + 8, Endianness::BigEndian);
+    int32_t numSamplesPerChannel = fourBytesToInt (fileData, p + 10, Endianness::BigEndian);
+    bitDepth = (int) twoBytesToInt (fileData, p + 14, Endianness::BigEndian);
+    sampleRate = getAiffSampleRate (fileData, p + 16);
+    
+    // check the sample rate was properly decoded
+    if (sampleRate == -1)
+    {
+        std::cout << "ERROR: this AIFF file has an unsupported sample rate" << std::endl;
+        return false;
+    }
+    
+    // check the number of channels is mono or stereo
+    if (numChannels < 1 ||numChannels > 2)
+    {
+        std::cout << "ERROR: this AIFF file seems to be neither mono nor stereo (perhaps multi-track, or corrupted?)" << std::endl;
+        return false;
+    }
+    
+    // check bit depth is either 8, 16 or 24 bit
+    if (bitDepth != 8 && bitDepth != 16 && bitDepth != 24)
+    {
+        std::cout << "ERROR: this file has a bit depth that is not 8, 16 or 24 bits" << std::endl;
+        return false;
+    }
+    
+    // -----------------------------------------------------------
+    // SSND CHUNK
+    int s = indexOfSoundDataChunk;
+    std::string soundDataChunkID (fileData.begin() + s, fileData.begin() + s + 4);
+    int32_t soundDataChunkSize = fourBytesToInt (fileData, s + 4, Endianness::BigEndian);
+    int32_t offset = fourBytesToInt (fileData, s + 8, Endianness::BigEndian);
+    //int32_t blockSize = fourBytesToInt (fileData, s + 12, Endianness::BigEndian);
+    
+    int numBytesPerSample = bitDepth / 8;
+    int numBytesPerFrame = numBytesPerSample * numChannels;
+    int totalNumAudioSampleBytes = numSamplesPerChannel * numBytesPerFrame;
+    int samplesStartIndex = s + 16 + (int)offset;
+        
+    // sanity check the data
+    if ((soundDataChunkSize - 8) != totalNumAudioSampleBytes || totalNumAudioSampleBytes > (fileData.size() - samplesStartIndex))
+    {
+        std::cout << "ERROR: the metadatafor this file doesn't seem right" << std::endl;
+        return false;
+    }
+    
+    clearAudioBuffer();
+    samples.resize (numChannels);
+    
+    for (int i = 0; i < numSamplesPerChannel; i++)
+    {
+        for (int channel = 0; channel < numChannels; channel++)
+        {
+            int sampleIndex = samplesStartIndex + (numBytesPerFrame * i) + channel * numBytesPerSample;
+            
+            if (bitDepth == 8)
+            {
+                int8_t sampleAsSigned8Bit = (int8_t)fileData[sampleIndex];
+                T sample = (T)sampleAsSigned8Bit / (T)128.;
+                samples[channel].push_back (sample);
+            }
+            else if (bitDepth == 16)
+            {
+                int16_t sampleAsInt = twoBytesToInt (fileData, sampleIndex, Endianness::BigEndian);
+                T sample = sixteenBitIntToSample (sampleAsInt);
+                samples[channel].push_back (sample);
+            }
+            else if (bitDepth == 24)
+            {
+                int32_t sampleAsInt = 0;
+                sampleAsInt = (fileData[sampleIndex] << 16) | (fileData[sampleIndex + 1] << 8) | fileData[sampleIndex + 2];
+                
+                if (sampleAsInt & 0x800000) //  if the 24th bit is set, this is a negative number in 24-bit world
+                    sampleAsInt = sampleAsInt | ~0xFFFFFF; // so make sure sign is extended to the 32 bit float
+                
+                T sample = (T)sampleAsInt / (T)8388608.;
+                samples[channel].push_back (sample);
+            }
+            else
+            {
+                assert (false);
+            }
+        }
+    }
+    
+    return true;
+}
+
+//=============================================================
+template <class T>
+uint32_t AudioFile<T>::getAiffSampleRate (std::vector<uint8_t>& fileData, int sampleRateStartIndex)
+{
+    for (auto it : aiffSampleRateTable)
+    {
+        if (tenByteMatch (fileData, sampleRateStartIndex, it.second, 0))
+            return it.first;
+    }
+    
+    return -1;
+}
+
+//=============================================================
+template <class T>
+bool AudioFile<T>::tenByteMatch (std::vector<uint8_t>& v1, int startIndex1, std::vector<uint8_t>& v2, int startIndex2)
+{
+    for (int i = 0; i < 10; i++)
+    {
+        if (v1[startIndex1 + i] != v2[startIndex2 + i])
+            return false;
+    }
+    
+    return true;
+}
+
+//=============================================================
+template <class T>
+void AudioFile<T>::addSampleRateToAiffData (std::vector<uint8_t>& fileData, uint32_t sampleRate)
+{
+    if (aiffSampleRateTable.count (sampleRate) > 0)
+    {
+        for (int i = 0; i < 10; i++)
+            fileData.push_back (aiffSampleRateTable[sampleRate][i]);
+    }
+}
+
+//=============================================================
+template <class T>
+bool AudioFile<T>::save (std::string filePath, AudioFileFormat format)
+{
+    if (format == AudioFileFormat::Wave)
+    {
+        return saveToWaveFile (filePath);
+    }
+    else if (format == AudioFileFormat::Aiff)
+    {
+        return saveToAiffFile (filePath);
+    }
+    
+    return false;
+}
+
+//=============================================================
+template <class T>
+bool AudioFile<T>::saveToWaveFile (std::string filePath)
+{
+    std::vector<uint8_t> fileData;
+    
+    int32_t dataChunkSize = getNumSamplesPerChannel() * (getNumChannels() * bitDepth / 8);
+    
+    // -----------------------------------------------------------
+    // HEADER CHUNK
+    addStringToFileData (fileData, "RIFF");
+    
+    // The file size in bytes is the header chunk size (4, not counting RIFF and WAVE) + the format
+    // chunk size (24) + the metadata part of the data chunk plus the actual data chunk size
+    int32_t fileSizeInBytes = 4 + 24 + 8 + dataChunkSize;
+    addInt32ToFileData (fileData, fileSizeInBytes);
+    
+    addStringToFileData (fileData, "WAVE");
+    
+    // -----------------------------------------------------------
+    // FORMAT CHUNK
+    addStringToFileData (fileData, "fmt ");
+    addInt32ToFileData (fileData, 16); // format chunk size (16 for PCM)
+    addInt16ToFileData (fileData, 1); // audio format = 1
+    addInt16ToFileData (fileData, (int16_t)getNumChannels()); // num channels
+    addInt32ToFileData (fileData, (int32_t)sampleRate); // sample rate
+    
+    int32_t numBytesPerSecond = (int32_t) ((getNumChannels() * sampleRate * bitDepth) / 8);
+    addInt32ToFileData (fileData, numBytesPerSecond);
+    
+    int16_t numBytesPerBlock = getNumChannels() * (bitDepth / 8);
+    addInt16ToFileData (fileData, numBytesPerBlock);
+    
+    addInt16ToFileData (fileData, (int16_t)bitDepth);
+    
+    // -----------------------------------------------------------
+    // DATA CHUNK
+    addStringToFileData (fileData, "data");
+    addInt32ToFileData (fileData, dataChunkSize);
+    
+    for (int i = 0; i < getNumSamplesPerChannel(); i++)
+    {
+        for (int channel = 0; channel < getNumChannels(); channel++)
+        {
+            if (bitDepth == 8)
+            {
+                uint8_t byte = sampleToSingleByte (samples[channel][i]);
+                fileData.push_back (byte);
+            }
+            else if (bitDepth == 16)
+            {
+                int16_t sampleAsInt = sampleToSixteenBitInt (samples[channel][i]);
+                addInt16ToFileData (fileData, sampleAsInt);
+            }
+            else if (bitDepth == 24)
+            {
+                int32_t sampleAsIntAgain = (int32_t) (samples[channel][i] * (T)8388608.);
+                
+                uint8_t bytes[3];
+                bytes[2] = (uint8_t) (sampleAsIntAgain >> 16) & 0xFF;
+                bytes[1] = (uint8_t) (sampleAsIntAgain >>  8) & 0xFF;
+                bytes[0] = (uint8_t) sampleAsIntAgain & 0xFF;
+                
+                fileData.push_back (bytes[0]);
+                fileData.push_back (bytes[1]);
+                fileData.push_back (bytes[2]);
+            }
+            else
+            {
+                assert (false && "Trying to write a file with unsupported bit depth");
+                return false;
+            }
+        }
+    }
+    
+    // check that the various sizes we put in the metadata are correct
+    if (fileSizeInBytes != (fileData.size() - 8) || dataChunkSize != (getNumSamplesPerChannel() * getNumChannels() * (bitDepth / 8)))
+    {
+        std::cout << "ERROR: couldn't save file to " << filePath << std::endl;
+        return false;
+    }
+    
+    // try to write the file
+    return writeDataToFile (fileData, filePath);
+}
+
+//=============================================================
+template <class T>
+bool AudioFile<T>::saveToAiffFile (std::string filePath)
+{
+    std::vector<uint8_t> fileData;
+    
+    int32_t numBytesPerSample = bitDepth / 8;
+    int32_t numBytesPerFrame = numBytesPerSample * getNumChannels();
+    int32_t totalNumAudioSampleBytes = getNumSamplesPerChannel() * numBytesPerFrame;
+    int32_t soundDataChunkSize = totalNumAudioSampleBytes + 8;
+    
+    // -----------------------------------------------------------
+    // HEADER CHUNK
+    addStringToFileData (fileData, "FORM");
+    
+    // The file size in bytes is the header chunk size (4, not counting FORM and AIFF) + the COMM
+    // chunk size (26) + the metadata part of the SSND chunk plus the actual data chunk size
+    int32_t fileSizeInBytes = 4 + 26 + 16 + totalNumAudioSampleBytes;
+    addInt32ToFileData (fileData, fileSizeInBytes, Endianness::BigEndian);
+    
+    addStringToFileData (fileData, "AIFF");
+    
+    // -----------------------------------------------------------
+    // COMM CHUNK
+    addStringToFileData (fileData, "COMM");
+    addInt32ToFileData (fileData, 18, Endianness::BigEndian); // commChunkSize
+    addInt16ToFileData (fileData, getNumChannels(), Endianness::BigEndian); // num channels
+    addInt32ToFileData (fileData, getNumSamplesPerChannel(), Endianness::BigEndian); // num samples per channel
+    addInt16ToFileData (fileData, bitDepth, Endianness::BigEndian); // bit depth
+    addSampleRateToAiffData (fileData, sampleRate);
+    
+    // -----------------------------------------------------------
+    // SSND CHUNK
+    addStringToFileData (fileData, "SSND");
+    addInt32ToFileData (fileData, soundDataChunkSize, Endianness::BigEndian);
+    addInt32ToFileData (fileData, 0, Endianness::BigEndian); // offset
+    addInt32ToFileData (fileData, 0, Endianness::BigEndian); // block size
+    
+    for (int i = 0; i < getNumSamplesPerChannel(); i++)
+    {
+        for (int channel = 0; channel < getNumChannels(); channel++)
+        {
+            if (bitDepth == 8)
+            {
+                uint8_t byte = sampleToSingleByte (samples[channel][i]);
+                fileData.push_back (byte);
+            }
+            else if (bitDepth == 16)
+            {
+                int16_t sampleAsInt = sampleToSixteenBitInt (samples[channel][i]);
+                addInt16ToFileData (fileData, sampleAsInt, Endianness::BigEndian);
+            }
+            else if (bitDepth == 24)
+            {
+                int32_t sampleAsIntAgain = (int32_t) (samples[channel][i] * (T)8388608.);
+                
+                uint8_t bytes[3];
+                bytes[0] = (uint8_t) (sampleAsIntAgain >> 16) & 0xFF;
+                bytes[1] = (uint8_t) (sampleAsIntAgain >>  8) & 0xFF;
+                bytes[2] = (uint8_t) sampleAsIntAgain & 0xFF;
+                
+                fileData.push_back (bytes[0]);
+                fileData.push_back (bytes[1]);
+                fileData.push_back (bytes[2]);
+            }
+            else
+            {
+                assert (false && "Trying to write a file with unsupported bit depth");
+                return false;
+            }
+        }
+    }
+    
+    // check that the various sizes we put in the metadata are correct
+    if (fileSizeInBytes != (fileData.size() - 8) || soundDataChunkSize != getNumSamplesPerChannel() *  numBytesPerFrame + 8)
+    {
+        std::cout << "ERROR: couldn't save file to " << filePath << std::endl;
+        return false;
+    }
+    
+    // try to write the file
+    return writeDataToFile (fileData, filePath);
+}
+
+//=============================================================
+template <class T>
+bool AudioFile<T>::writeDataToFile (std::vector<uint8_t>& fileData, std::string filePath)
+{
+    std::ofstream outputFile (filePath, std::ios::binary);
+    
+    if (outputFile.is_open())
+    {
+        for (int i = 0; i < fileData.size(); i++)
+        {
+            char value = (char) fileData[i];
+            outputFile.write (&value, sizeof (char));
+        }
+        
+        outputFile.close();
+        
+        return true;
+    }
+    
+    return false;
+}
+
+//=============================================================
+template <class T>
+void AudioFile<T>::addStringToFileData (std::vector<uint8_t>& fileData, std::string s)
+{
+    for (int i = 0; i < s.length();i++)
+        fileData.push_back ((uint8_t) s[i]);
+}
+
+//=============================================================
+template <class T>
+void AudioFile<T>::addInt32ToFileData (std::vector<uint8_t>& fileData, int32_t i, Endianness endianness)
+{
+    uint8_t bytes[4];
+    
+    if (endianness == Endianness::LittleEndian)
+    {
+        bytes[3] = (i >> 24) & 0xFF;
+        bytes[2] = (i >> 16) & 0xFF;
+        bytes[1] = (i >> 8) & 0xFF;
+        bytes[0] = i & 0xFF;
+    }
+    else
+    {
+        bytes[0] = (i >> 24) & 0xFF;
+        bytes[1] = (i >> 16) & 0xFF;
+        bytes[2] = (i >> 8) & 0xFF;
+        bytes[3] = i & 0xFF;
+    }
+    
+    for (int i = 0; i < 4; i++)
+        fileData.push_back (bytes[i]);
+}
+
+//=============================================================
+template <class T>
+void AudioFile<T>::addInt16ToFileData (std::vector<uint8_t>& fileData, int16_t i, Endianness endianness)
+{
+    uint8_t bytes[2];
+    
+    if (endianness == Endianness::LittleEndian)
+    {
+        bytes[1] = (i >> 8) & 0xFF;
+        bytes[0] = i & 0xFF;
+    }
+    else
+    {
+        bytes[0] = (i >> 8) & 0xFF;
+        bytes[1] = i & 0xFF;
+    }
+    
+    fileData.push_back (bytes[0]);
+    fileData.push_back (bytes[1]);
+}
+
+//=============================================================
+template <class T>
+void AudioFile<T>::clearAudioBuffer()
+{
+    for (int i = 0; i < samples.size();i++)
+    {
+        samples[i].clear();
+    }
+    
+    samples.clear();
+}
+
+//=============================================================
+template <class T>
+AudioFileFormat AudioFile<T>::determineAudioFileFormat (std::vector<uint8_t>& fileData)
+{
+    std::string header (fileData.begin(), fileData.begin() + 4);
+    
+    if (header == "RIFF")
+        return AudioFileFormat::Wave;
+    else if (header == "FORM")
+        return AudioFileFormat::Aiff;
+    else
+        return AudioFileFormat::Error;
+}
+
+//=============================================================
+template <class T>
+int32_t AudioFile<T>::fourBytesToInt (std::vector<uint8_t>& source, int startIndex, Endianness endianness)
+{
+    int32_t result;
+    
+    if (endianness == Endianness::LittleEndian)
+        result = (source[startIndex + 3] << 24) | (source[startIndex + 2] << 16) | (source[startIndex + 1] << 8) | source[startIndex];
+    else
+        result = (source[startIndex] << 24) | (source[startIndex + 1] << 16) | (source[startIndex + 2] << 8) | source[startIndex + 3];
+    
+    return result;
+}
+
+//=============================================================
+template <class T>
+int16_t AudioFile<T>::twoBytesToInt (std::vector<uint8_t>& source, int startIndex, Endianness endianness)
+{
+    int16_t result;
+    
+    if (endianness == Endianness::LittleEndian)
+        result = (source[startIndex + 1] << 8) | source[startIndex];
+    else
+        result = (source[startIndex] << 8) | source[startIndex + 1];
+    
+    return result;
+}
+
+//=============================================================
+template <class T>
+int AudioFile<T>::getIndexOfString (std::vector<uint8_t>& source, std::string stringToSearchFor)
+{
+    int index = -1;
+    int stringLength = (int)stringToSearchFor.length();
+    
+    for (int i = 0; i < source.size() - stringLength;i++)
+    {
+        std::string section (source.begin() + i, source.begin() + i + stringLength);
+        
+        if (section == stringToSearchFor)
+        {
+            index = i;
+            break;
+        }
+    }
+    
+    return index;
+}
+
+//=============================================================
+template <class T>
+T AudioFile<T>::sixteenBitIntToSample (int16_t sample)
+{
+    return static_cast<T> (sample) / static_cast<T> (32768.);
+}
+
+//=============================================================
+template <class T>
+int16_t AudioFile<T>::sampleToSixteenBitInt (T sample)
+{
+    sample = clamp (sample, -1., 1.);
+    return static_cast<int16_t> (sample * 32767.);
+}
+
+//=============================================================
+template <class T>
+uint8_t AudioFile<T>::sampleToSingleByte (T sample)
+{
+    sample = clamp (sample, -1., 1.);
+    sample = (sample + 1.) / 2.;
+    return static_cast<uint8_t> (sample * 255.);
+}
+
+//=============================================================
+template <class T>
+T AudioFile<T>::singleByteToSample (uint8_t sample)
+{
+    return static_cast<T> (sample - 128) / static_cast<T> (128.);
+}
+
+#include <algorithm>
+//=============================================================
+template <class T>
+T AudioFile<T>::clamp (T value, T minValue, T maxValue)
+{
+    value = std::min (value, maxValue);
+    value = std::max (value, minValue);
+    return value;
+}
+
+//===========================================================
+template class AudioFile<float>;
+template class AudioFile<double>;
diff --git a/AudioFile.h b/AudioFile.h
new file mode 100644
index 0000000..2c5130f
--- /dev/null
+++ b/AudioFile.h
@@ -0,0 +1,181 @@
+//=======================================================================
+/** @file AudioFile.h
+ *  @author Adam Stark
+ *  @copyright Copyright (C) 2017  Adam Stark
+ *
+ * This file is part of the 'AudioFile' library
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+//=======================================================================
+
+#ifndef _AS_AudioFile_h
+#define _AS_AudioFile_h
+
+#include <iostream>
+#include <vector>
+#include <assert.h>
+#include <string>
+
+
+//=============================================================
+/** The different types of audio file, plus some other types to 
+ * indicate a failure to load a file, or that one hasn't been
+ * loaded yet
+ */
+enum class AudioFileFormat
+{
+    Error,
+    NotLoaded,
+    Wave,
+    Aiff
+};
+
+//=============================================================
+template <class T>
+class AudioFile
+{
+public:
+    
+    //=============================================================
+    typedef std::vector<std::vector<T> > AudioBuffer;
+    
+    //=============================================================
+    /** Constructor */
+    AudioFile();
+        
+    //=============================================================
+    /** Loads an audio file from a given file path.
+     * @Returns true if the file was successfully loaded
+     */
+    bool load (std::string filePath);
+    
+    /** Saves an audio file to a given file path.
+     * @Returns true if the file was successfully saved
+     */
+    bool save (std::string filePath, AudioFileFormat format = AudioFileFormat::Wave);
+        
+    //=============================================================
+    /** @Returns the sample rate */
+    uint32_t getSampleRate() const;
+    
+    /** @Returns the number of audio channels in the buffer */
+    int getNumChannels() const;
+
+    /** @Returns true if the audio file is mono */
+    bool isMono() const;
+    
+    /** @Returns true if the audio file is stereo */
+    bool isStereo() const;
+    
+    /** @Returns the bit depth of each sample */
+    int getBitDepth() const;
+    
+    /** @Returns the number of samples per channel */
+    int getNumSamplesPerChannel() const;
+    
+    /** @Returns the length in seconds of the audio file based on the number of samples and sample rate */
+    double getLengthInSeconds() const;
+    
+    /** Prints a summary of the audio file to the console */
+    void printSummary() const;
+    
+    //=============================================================
+    
+    /** Set the audio buffer for this AudioFile by copying samples from another buffer.
+     * @Returns true if the buffer was copied successfully.
+     */
+    bool setAudioBuffer (AudioBuffer& newBuffer);
+    
+    /** Sets the audio buffer to a given number of channels and number of samples per channel. This will try to preserve
+     * the existing audio, adding zeros to any new channels or new samples in a given channel.
+     */
+    void setAudioBufferSize (int numChannels, int numSamples);
+    
+    /** Sets the number of samples per channel in the audio buffer. This will try to preserve
+     * the existing audio, adding zeros to new samples in a given channel if the number of samples is increased.
+     */
+    void setNumSamplesPerChannel (int numSamples);
+    
+    /** Sets the number of channels. New channels will have the correct number of samples and be initialised to zero */
+    void setNumChannels (int numChannels);
+    
+    /** Sets the bit depth for the audio file. If you use the save() function, this bit depth rate will be used */
+    void setBitDepth (int numBitsPerSample);
+    
+    /** Sets the sample rate for the audio file. If you use the save() function, this sample rate will be used */
+    void setSampleRate (uint32_t newSampleRate);
+    
+    //=============================================================
+    /** A vector of vectors holding the audio samples for the AudioFile. You can 
+     * access the samples by channel and then by sample index, i.e:
+     *
+     *      samples[channel][sampleIndex]
+     */
+    AudioBuffer samples;
+    
+private:
+    
+    //=============================================================
+    enum class Endianness
+    {
+        LittleEndian,
+        BigEndian
+    };
+    
+    //=============================================================
+    AudioFileFormat determineAudioFileFormat (std::vector<uint8_t>& fileData);
+    bool decodeWaveFile (std::vector<uint8_t>& fileData);
+    bool decodeAiffFile (std::vector<uint8_t>& fileData);
+    
+    //=============================================================
+    bool saveToWaveFile (std::string filePath);
+    bool saveToAiffFile (std::string filePath);
+    
+    //=============================================================
+    void clearAudioBuffer();
+    
+    //=============================================================
+    int32_t fourBytesToInt (std::vector<uint8_t>& source, int startIndex, Endianness endianness = Endianness::LittleEndian);
+    int16_t twoBytesToInt (std::vector<uint8_t>& source, int startIndex, Endianness endianness = Endianness::LittleEndian);
+    int getIndexOfString (std::vector<uint8_t>& source, std::string s);
+    
+    //=============================================================
+    T sixteenBitIntToSample (int16_t sample);
+    int16_t sampleToSixteenBitInt (T sample);
+    
+    //=============================================================
+    uint8_t sampleToSingleByte (T sample);
+    T singleByteToSample (uint8_t sample);
+    
+    uint32_t getAiffSampleRate (std::vector<uint8_t>& fileData, int sampleRateStartIndex);
+    bool tenByteMatch (std::vector<uint8_t>& v1, int startIndex1, std::vector<uint8_t>& v2, int startIndex2);
+    void addSampleRateToAiffData (std::vector<uint8_t>& fileData, uint32_t sampleRate);
+    T clamp (T v1, T minValue, T maxValue);
+    
+    //=============================================================
+    void addStringToFileData (std::vector<uint8_t>& fileData, std::string s);
+    void addInt32ToFileData (std::vector<uint8_t>& fileData, int32_t i, Endianness endianness = Endianness::LittleEndian);
+    void addInt16ToFileData (std::vector<uint8_t>& fileData, int16_t i, Endianness endianness = Endianness::LittleEndian);
+    
+    //=============================================================
+    bool writeDataToFile (std::vector<uint8_t>& fileData, std::string filePath);
+    
+    //=============================================================
+    AudioFileFormat audioFileFormat;
+    uint32_t sampleRate;
+    int bitDepth;
+};
+
+#endif /* AudioFile_h */
diff --git a/CPU_FDTD.hpp b/CPU_FDTD.hpp
index 156b86d..8e9ff2f 100644
--- a/CPU_FDTD.hpp
+++ b/CPU_FDTD.hpp
@@ -12,6 +12,7 @@
 
 struct CPU_FDTD_Arguments
 {
+	bool isDebug;
 	unsigned int modelWidth;
 	unsigned int modelHeight;
 	float boundaryGain;
@@ -25,6 +26,8 @@ struct CPU_FDTD_Arguments
 class CPU_FDTD_Serial : public DSP
 {
 private:
+	bool isDebug_;
+
 	//Model Dimensions//
 	typedef float base_type_;
 	const int modelWidth_;
@@ -45,7 +48,7 @@ private:
 	Buffer<base_type_> excitation_;
 
 public:
-	CPU_FDTD_Serial(int aModelWidth, int aModelHeight, float aBoundaryGain, const int aBufferSize, property_type_ aPropagationFactor, property_type_ aDampingCoefficient, unsigned int listenerPosition[2], unsigned int excitationPosition[2]) :
+	CPU_FDTD_Serial(int aModelWidth, int aModelHeight, float aBoundaryGain, const int aBufferSize, property_type_ aPropagationFactor, property_type_ aDampingCoefficient, unsigned int listenerPosition[2], unsigned int excitationPosition[2], bool aIsDebug) :
 		modelWidth_(aModelWidth),
 		modelHeight_(aModelHeight),
 		gridElements_(modelWidth_*modelHeight_),
@@ -54,7 +57,8 @@ public:
 		dampingCoefficient_(aDampingCoefficient),
 		model_(aModelWidth, aModelHeight, aBoundaryGain),
 		output_(aBufferSize),
-		excitation_(aBufferSize)
+		excitation_(aBufferSize),
+		isDebug_(aIsDebug)
 	{
 		listenerPosition_[0] = listenerPosition[0];
 		listenerPosition_[1] = listenerPosition[1];
@@ -71,7 +75,8 @@ public:
 		dampingCoefficient_(args.dampingCoefficient),
 		model_(args.modelWidth, args.modelHeight, args.boundaryGain),
 		output_(args.bufferSize),
-		excitation_(args.bufferSize)
+		excitation_(args.bufferSize),
+		isDebug_(args.isDebug)
 	{
 		listenerPosition_[0] = args.listenerPosition[0];
 		listenerPosition_[1] = args.listenerPosition[1];
@@ -91,9 +96,9 @@ public:
 		const base_type_ oneOverMuPlus = 1.0f / (dampingCoefficient_ + 1.0f);
 		for (int i = 0; i != frames; ++i)
 		{
-			for (int x = 0; x != modelWidth_; x++)
+			for (int y = 0; y != modelHeight_; y++)
 			{
-				for (int y = 0; y != modelHeight_; y++)
+				for (int x = 0; x != modelWidth_; x++)
 				{
 					//CellType cellType = n->cellType(x, y);
 					base_type_ centrePressureNMO = model_.nMinusOneGrid()->valueAt(x, y);
diff --git a/Kernels/fdtdLocal.cl b/Kernels/fdtdLocal.cl
index b5f9070..e03bcdc 100644
--- a/Kernels/fdtdLocal.cl
+++ b/Kernels/fdtdLocal.cl
@@ -36,7 +36,7 @@ void ftdtCompute(__global float* gridOne, __global float* gridTwo, __global floa
 	}
 	
 	lGrid[ixyLocal] = n[ixy];
-	//barrier(CLK_LOCAL_MEM_FENCE);
+	barrier(CLK_LOCAL_MEM_FENCE);
 	
 	//Initalise pressure values//
 	float centrePressureNMO = nMOne[ixy];
diff --git a/OpenCL_FDTD.hpp b/OpenCL_FDTD.hpp
index 9fd8485..1cf7f0c 100644
--- a/OpenCL_FDTD.hpp
+++ b/OpenCL_FDTD.hpp
@@ -16,6 +16,8 @@
 
 struct OpenCL_FDTD_Arguments
 {
+	bool isDebug;
+	bool isBenchmark;
 	unsigned int modelWidth;
 	unsigned int modelHeight;
 	float boundaryGain;
@@ -24,13 +26,26 @@ struct OpenCL_FDTD_Arguments
 	float dampingCoefficient;
 	unsigned int listenerPosition[2];
 	unsigned int excitationPosition[2];
-	unsigned int workGroupDimensions[2]; //?
+	unsigned int workGroupDimensions[2];
 	std::string kernelSource;
 };
 
 class OpenCL_FDTD : public DSP
 {
 private:
+	//Print Debug Information//
+	bool isDebug_;
+
+	//Calculate + Print Benchmarking//
+	bool isBenchmark_;
+	cl::Event kernelBenchmark_;
+	cl_ulong kernelComputeStartTime_;
+	cl_ulong kernelComputeEndTime_;
+	cl_ulong kernelComputeElapsedTime_;
+	cl_ulong kernelOverheadStartTime_;
+	cl_ulong kernelOverheadEndTime_;
+	cl_ulong kernelOverheadElapsedTime_;
+
 	//Model Dimensions//
 	typedef float base_type_;
 	const int modelWidth_;
@@ -73,7 +88,7 @@ private:
 	cl::Buffer excitationBuffer;
 	cl::Buffer localBuffer;
 
-	int bufferRotationIndex_ = 0;
+	int bufferRotationIndex_;
 
 	const std::string path_;
 
@@ -88,9 +103,10 @@ public:
 		model_(0, 0, 0.0),
 		output_(0),
 		excitation_(0),
-		bufferSize_(0)
+		bufferSize_(0),
+		bufferRotationIndex_(0)
 	{}
-	OpenCL_FDTD(int aModelWidth, int aModelHeight, float aBoundaryGain, const int aBufferSize, property_type_ aPropagationFactor, property_type_ aDampingCoefficient, unsigned int listenerPosition[2], unsigned int excitationPosition[2], unsigned int workGroupDimensions[2], const char* aKernelSource) :
+	OpenCL_FDTD(int aModelWidth, int aModelHeight, float aBoundaryGain, const int aBufferSize, property_type_ aPropagationFactor, property_type_ aDampingCoefficient, unsigned int listenerPosition[2], unsigned int excitationPosition[2], unsigned int workGroupDimensions[2], const char* aKernelSource, bool aIsDebug, bool aIsBenchmark) :
 		modelWidth_(aModelWidth),
 		modelHeight_(aModelHeight),
 		gridElements_(modelWidth_*modelHeight_),
@@ -101,7 +117,10 @@ public:
 		bufferSize_(aBufferSize),
 		output_(bufferSize_),
 		excitation_(bufferSize_),
-		path_(aKernelSource)
+		path_(aKernelSource),
+		isDebug_(aIsDebug),
+		isBenchmark_(aIsBenchmark),
+		bufferRotationIndex_(0)
 	{
 		listenerPosition_[0] = listenerPosition[0];
 		listenerPosition_[1] = listenerPosition[1];
@@ -111,6 +130,8 @@ public:
 		globalWorkSpaceY_ = modelHeight_;
 		localWorkSpaceX_ = workGroupDimensions[0];
 		localWorkSpaceY_ = workGroupDimensions[1];
+		kernelComputeElapsedTime_ = 0;
+		kernelOverheadStartTime_ = 0;
 		init();
 	}
 	OpenCL_FDTD(OpenCL_FDTD_Arguments args) :
@@ -123,7 +144,10 @@ public:
 		model_(modelWidth_, modelHeight_, args.boundaryGain),
 		output_(args.bufferSize),
 		excitation_(args.bufferSize),
-		path_(args.kernelSource)
+		path_(args.kernelSource),
+		isDebug_(args.isDebug),
+		isBenchmark_(args.isBenchmark),
+		bufferRotationIndex_(0)
 	{
 		listenerPosition_[0] = args.listenerPosition[0];
 		listenerPosition_[1] = args.listenerPosition[1];
@@ -136,6 +160,12 @@ public:
 		//Set size of NDRange and workgroups//
 		globalws_ = cl::NDRange(modelWidth_, modelHeight_);
 		localws_ = cl::NDRange(localWorkSpaceX_, localWorkSpaceY_);
+		kernelComputeElapsedTime_ = 0;
+		kernelOverheadStartTime_ = 0;
+
+		if(isDebug_)
+			printAvailableDevices();
+
 		init();
 	}
 	~OpenCL_FDTD()
@@ -187,8 +217,6 @@ public:
 		//Get device list from context//
 		std::vector<cl::Device> devices = context.getInfo<CL_CONTEXT_DEVICES>();
 
-		std::cout << "WAVEFRONT: " << devices[0].getInfo<CL_DEVICE_WAVEFRONT_WIDTH_AMD>() << std::endl;
-
 		//Create command queue for first device - Profiling enabled//
 		commandQueue = cl::CommandQueue(context, devices[0], CL_QUEUE_PROFILING_ENABLE, &errorStatus);	//Need to specify device 1[0] of platform 3[2] for dedicated graphics - Harri Laptop.
 		if (errorStatus)
@@ -239,8 +267,6 @@ public:
 			std::cout << "ERROR creating kernel. Status code: " << errorStatus << std::endl;
 
 		//Set static kernel arguments//
-
-		//Set kernel arguments//
 		ftdtKernel.setArg(0, sizeof(cl_mem), &nMinusOnePressureBuffer);
 		ftdtKernel.setArg(1, sizeof(cl_mem), &nPressureBuffer);
 		ftdtKernel.setArg(2, sizeof(cl_mem), &nPlusOnePressureBuffer);
@@ -250,12 +276,17 @@ public:
 		unsigned int localWorkspaceSize = localWorkSpaceX_ * localWorkSpaceY_ * sizeof(float);
 		ftdtKernel.setArg(12, localWorkspaceSize, NULL);	//To allocate local memory dynamically, must be given a size here.
 	}
-	float step(cl::Event* kernelBenchmark)
+	float step()
 	{
-		commandQueue.enqueueNDRangeKernel(ftdtKernel, cl::NullRange/*globaloffset*/, globalws_, localws_, NULL, kernelBenchmark);
-		//commandQueue.finish();
-		//kernelBenchmark.wait();
-		//elapsedTime += kernelBenchmark.getProfilingInfo<CL_PROFILING_COMMAND_END>() - kernelBenchmark.getProfilingInfo<CL_PROFILING_COMMAND_START>();
+		commandQueue.enqueueNDRangeKernel(ftdtKernel, cl::NullRange/*globaloffset*/, globalws_, localws_, NULL, &kernelBenchmark_);
+
+		//Record benchmark time//
+		if (isBenchmark_)
+		{
+			kernelBenchmark_.wait();
+			kernelComputeElapsedTime_ += kernelBenchmark_.getProfilingInfo<CL_PROFILING_COMMAND_END>() - kernelBenchmark_.getProfilingInfo<CL_PROFILING_COMMAND_START>();
+			kernelOverheadElapsedTime_ += kernelBenchmark_.getProfilingInfo<CL_PROFILING_COMMAND_SUBMIT>() - kernelBenchmark_.getProfilingInfo<CL_PROFILING_COMMAND_QUEUED>();
+		}
 
 		output_.bufferIndex_++;
 		excitation_.bufferIndex_++;
@@ -265,9 +296,6 @@ public:
 	}
 	bool compute(unsigned long frames, float* inbuf, float* outbuf)
 	{
-		cl::Event kernelBenchmark;
-		cl_ulong elapsedTime = 0;
-
 		//Set dynamic kernel arguments//
 		int listenerPositionArg = model_.getListenerPosition();
 		int excitationPositionArg = model_.getExcitationPosition();
@@ -277,7 +305,6 @@ public:
 		commandQueue.enqueueWriteBuffer(excitationBuffer, CL_TRUE, 0, excitation_.bufferSize_, inbuf);
 		ftdtKernel.setArg(6, sizeof(cl_mem), &excitationBuffer);
 
-		//int bufferRotationnGridIndex = 0; //-1?
 		//Calculate buffer size of synthesizer output samples//
 		for (unsigned int i = 0; i != frames; ++i)
 		{
@@ -285,19 +312,8 @@ public:
 			ftdtKernel.setArg(4, sizeof(int), &output_.bufferIndex_);
 			ftdtKernel.setArg(11, sizeof(int), &bufferRotationIndex_);
 
-			step(&kernelBenchmark);
+			step();
 		}
-		//printf("Elapsed time is: %0.3f\n", (double)elapsed / 1000000.0);
-		cl_ulong time_start;
-		cl_ulong time_end;
-
-		//time_start = kernelBenchmarkStarts[0].getProfilingInfo<CL_PROFILING_COMMAND_START>();
-		//time_end = kernelBenchmarkEnds[0].getProfilingInfo<CL_PROFILING_COMMAND_END>();
-		//kernelBenchmarkEnds[0].wait();
-
-		//double nanoSeconds = time_end - time_start;
-		//printf("OpenCl Execution time is: %0.3f milliseconds \n", nanoSeconds / 1000000.0);
-		//printf("OpenCl Execution time is: %0.3f nanoseconds \n", nanoSeconds);
 
 		output_.resetIndex();
 		excitation_.resetIndex();
@@ -306,10 +322,6 @@ public:
 		for (int k = 0; k != frames; ++k)
 			outbuf[k] = output_[k];
 
-		//std::cout << "OpenCL kernel total execution time: " << (double)elapsedTime / 1000000000.0 << "seconds" << std::endl;
-		//std::cout << "OpenCL kernel total execution time: " << (double)elapsedTime / 1000000.0 << "milliseconds" << std::endl;
-		//std::cout << "OpenCL kernel total execution time: " << (double)elapsedTime / 1000.0 << "microseconds" << std::endl;
-
 		return true;
 	}
 	void updateDynamicVariables(property_type_ aPropagationFactor, property_type_ aDampingFactor, unsigned int aListenerPosition, unsigned int aExcitationPosition)
@@ -321,6 +333,24 @@ public:
 		ftdtKernel.setArg(10, sizeof(float), &aDampingFactor);
 	}
 
+	void printKernelBenchmarking()
+	{
+		if (isBenchmark_)
+		{
+			std::cout << "OpenCL kernel total compute execution time: " << (double)kernelComputeElapsedTime_ / 1000000000.0 << "seconds" << std::endl;
+			std::cout << "OpenCL kernel total compute execution time: " << (double)kernelComputeElapsedTime_ / 1000000.0 << "milliseconds" << std::endl;
+			std::cout << "OpenCL kernel total compute execution time: " << (double)kernelComputeElapsedTime_ / 1000.0 << "microseconds" << std::endl;
+
+			std::cout << std::endl;
+
+			std::cout << "OpenCL kernel total overhead execution time: " << (double)kernelOverheadElapsedTime_ / 1000000000.0 << "seconds" << std::endl;
+			std::cout << "OpenCL kernel total overhead execution time: " << (double)kernelOverheadElapsedTime_ / 1000000.0 << "milliseconds" << std::endl;
+			std::cout << "OpenCL kernel total overhead execution time: " << (double)kernelOverheadElapsedTime_ / 1000.0 << "microseconds" << std::endl;
+
+			std::cout << std::endl;
+		}
+	}
+
 	void printAvailableDevices()
 	{
 		cl::vector<cl::Platform> platforms;
@@ -356,17 +386,18 @@ public:
 				std::cout << "\t\tDevice Max Allocateable Memory: " << device.getInfo<CL_DEVICE_MAX_MEM_ALLOC_SIZE>() << std::endl;
 				std::cout << "\t\tDevice Local Memory: " << device.getInfo<CL_DEVICE_LOCAL_MEM_SIZE>() << std::endl;
 				std::cout << "\t\tDevice Available: " << device.getInfo< CL_DEVICE_AVAILABLE>() << std::endl;
+
+				//If an AMD platform//
+				if (strstr(platform.getInfo<CL_PLATFORM_NAME>().c_str(), "AMD"))
+				{
+					std::cout << "\tAMD Specific:" << std::endl;
+					std::cout << "\t\tAMD Wavefront size: " << device.getInfo<CL_DEVICE_WAVEFRONT_WIDTH_AMD>() << std::endl;
+				}
 			}
 			std::cout << std::endl;
 		}
 	}
 
-	void printDeviceInfo()
-	{
-		//device
-		//CL_DEVICE_WAVEFRONT_WIDTH_AMD
-	}
-
 	//Virtual Functions Defined//
 	bool fillBuffer(unsigned long frames, void* inbuf, void* outbuf) override
 	{
diff --git a/OpenGL_FDTD.hpp b/OpenGL_FDTD.hpp
index de7b1f1..c773e5d 100644
--- a/OpenGL_FDTD.hpp
+++ b/OpenGL_FDTD.hpp
@@ -19,6 +19,8 @@ void mouseButtonCallback(GLFWwindow* window, int button, int action, int mods);
 
 struct OpenGL_FDTD_Arguments
 {
+	bool isDebug;
+	bool isBenchmark;
 	unsigned int modelWidth;
 	unsigned int modelHeight;
 	float boundaryGain;
@@ -37,6 +39,9 @@ struct OpenGL_FDTD_Arguments
 class OpenGL_FDTD : public DSP
 {
 private:
+	bool isBenchmark_;
+	bool isDebug_;
+
 	//Model Dimensions//
 	typedef float base_type_;
 	const int modelWidth_;
@@ -74,7 +79,7 @@ private:
 	const unsigned int quadIndex0 = 0;			//The first simulation model grid - Alternates between timestep n & n-1.
 	const unsigned int quadIndex1 = 1;			//The second simulation model grid - Alternates switches between timestep n-1 & n.
 	const unsigned int quadIndex2 = 2;			//The audio buffer - Single fragment strip acting as a buffer for recording samples from listener point. 
-	const unsigned int renderMagnifier = 10;	//The factor which the model is scaled by when rendering the texture to screen.
+	const unsigned int renderMagnifier = 3;	//The factor which the model is scaled by when rendering the texture to screen.
 	const unsigned int ceilingHeight_ = 2;
 	static const unsigned int numOfAttributesPerVertex_ = 12;						//The number of pieces of information each vertex contains.
 	static const unsigned int numOfVerticesPerQuad_ = 4;								//Number of vertices that make up each texture quad.
@@ -137,7 +142,7 @@ public:
 		bufferSize_(0),
 		isVisualize_(false)
 	{}
-	OpenGL_FDTD(int aModelWidth, int aModelHeight, float aBoundaryGain, const int aBufferSize, property_type_ aPropagationFactor, property_type_ aDampingCoefficient, unsigned int listenerPosition[2], unsigned int excitationPosition[2], const char* aFboVsSource, const char* aFboFsSource, const char* aRenderVsSource, const char* aRenderFsSource, bool aIsVisualize) :
+	OpenGL_FDTD(int aModelWidth, int aModelHeight, float aBoundaryGain, const int aBufferSize, property_type_ aPropagationFactor, property_type_ aDampingCoefficient, unsigned int listenerPosition[2], unsigned int excitationPosition[2], const char* aFboVsSource, const char* aFboFsSource, const char* aRenderVsSource, const char* aRenderFsSource, bool aIsVisualize, bool aIsDebug, bool aIsBenchmark) :
 		modelWidth_(aModelWidth),
 		modelHeight_(aModelHeight),
 		gridElements_(modelWidth_*modelHeight_),
@@ -149,7 +154,9 @@ public:
 		excitation_(aBufferSize),
 		bufferSize_(aBufferSize),
 		paths_({ aFboVsSource, aFboFsSource, aRenderVsSource, aRenderFsSource }),
-		isVisualize_(aIsVisualize)
+		isVisualize_(aIsVisualize),
+		isDebug_(aIsDebug),
+		isBenchmark_(aIsBenchmark)
 	{
 		listenerPosition_[0] = listenerPosition[0];
 		listenerPosition_[1] = listenerPosition[1];
@@ -173,7 +180,9 @@ public:
 		excitation_(args.bufferSize),
 		bufferSize_(args.bufferSize),
 		paths_({ args.fboVsSource, args.fboFsSource, args.renderVsSource, args.renderFsSource }),
-		isVisualize_(args.isVisualize)
+		isVisualize_(args.isVisualize),
+		isDebug_(args.isDebug),
+		isBenchmark_(args.isBenchmark)
 	{
 		listenerPosition_[0] = args.listenerPosition[0];
 		listenerPosition_[1] = args.listenerPosition[1];
@@ -197,7 +206,7 @@ public:
 		glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
 
 		//Create GLFW window//
-		window_ = glfwCreateWindow(modelWidth_ * renderMagnifier, modelHeight_ * renderMagnifier, "LearnOpenGL", NULL, NULL);
+		window_ = glfwCreateWindow(modelWidth_ * renderMagnifier, modelHeight_ * renderMagnifier, "IWOCL FDTD OpenGL", NULL, NULL);
 		if (window_ == NULL)
 		{
 			std::cout << "Failed to create GLFW window" << std::endl;
@@ -478,7 +487,7 @@ public:
 		stateLocation_ = glGetUniformLocation(fboShaderProgram_, "currentState");
 
 		float excitationFragCoord[2];
-		excitationFragCoord[0] = (float)(excitationPosition_[0] + 0.5) / (float)textureWidth_;
+		excitationFragCoord[0] = (float)(excitationPosition_[0] + 0.5 + modelWidth_) / (float)textureWidth_;
 		excitationFragCoord[1] = (float)(excitationPosition_[1] + 0.5) / (float)textureHeight_;
 
 		//Value of excitation point - Active or not. This could be done differently? Just need an identified excitation point.//
@@ -511,6 +520,8 @@ public:
 		//Listener fragment coordinates as uniforms - Only need one quad for rendering//
 		GLuint listenerFragCoordLocationRender = glGetUniformLocation(renderShaderProgram_, "listenerFragCoord");
 		glUniform2f(listenerFragCoordLocationRender, listenerFragCoord[0][0], listenerFragCoord[0][1]);
+		GLuint excitationFragCoordLocationRender = glGetUniformLocation(renderShaderProgram_, "excitationFragCoord");
+		glUniform2f(excitationRenderPositionLocation_, excitationFragCoord[0], excitationFragCoord[1]);
 
 		//Set inputTexture as same texture at index 0, just for reading from//
 		glUniform1i(glGetUniformLocation(renderShaderProgram_, "inputTexture"), 0);
@@ -562,6 +573,7 @@ public:
 			//////////////////////
 
 			//Pass next excitation Value//
+			//glUniform2f(excitationRenderPositionLocation_, excitationFragCoord[0], excitationFragCoord[1]);
 			glUniform1f(excitationMagnitudeLocation_, excitationMagnitude_);
 			//glUniform2f(excitationPositionLocation_, excitationPosition_[0], excitationPosition_[1]);
 			//glUniform2f(excitationRenderPositionLocation_, excitationPosition_[0], excitationPosition_[1]);
@@ -671,6 +683,11 @@ public:
 		return true;
 	}
 
+	void printKernelBenchmarking()
+	{
+		
+	}
+
 	//Virtual Functions Defined//
 	bool fillBuffer(unsigned long frames, void* inbuf, void* outbuf) override
 	{
diff --git a/Shaders/render_fs.glsl b/Shaders/render_fs.glsl
index eb6031b..a0213b3 100644
--- a/Shaders/render_fs.glsl
+++ b/Shaders/render_fs.glsl
@@ -68,7 +68,7 @@ void main () {
 		float p = frag_color.r;
 		vec4 color = get_color_from_pressure(p);
 		
-		frag_color = mix(color, color_yellow, 0.5); // a bit transparent, to see propagation
+		frag_color = mix(color, color_yellow, 1.0); // a bit transparent, to see propagation
 	}
 	
 	
@@ -76,11 +76,11 @@ void main () {
 	vec2 posDiff = vec2(tex_c.x - listenerFragCoord.x, tex_c.y - listenerFragCoord.y);
 	vec2 absDiff = vec2(abs(posDiff.x), abs(posDiff.y));
 	if(absDiff.x<deltaCoord.x/2 && absDiff.y<deltaCoord.y/2)
-		frag_color = mix(frag_color, color_pink, 0.5); // a bit transparent, to see what's underneath
+		frag_color = mix(frag_color, color_pink, 1.0); // a bit transparent, to see what's underneath
 		
 	//Excitation Colour//
 	posDiff = vec2(tex_c.x - excitationFragCoord.x, tex_c.y - excitationFragCoord.y);
 	absDiff = vec2(abs(posDiff.x), abs(posDiff.y));
 	if(absDiff.x<deltaCoord.x && absDiff.y<deltaCoord.y)
-		frag_color = mix(frag_color, color_yellow, 0.5); // a bit transparent, to see what's underneath
+		frag_color = mix(frag_color, color_yellow, 1.0); // a bit transparent, to see what's underneath
 }
diff --git a/Wavetable Exciter.hpp b/Wavetable Exciter.hpp
index 675cdca..0eae03b 100644
--- a/Wavetable Exciter.hpp	
+++ b/Wavetable Exciter.hpp	
@@ -72,7 +72,7 @@ public:
 	WavetableExciter(const int duration, const float* wavetableBuffer, const size_t wavetableSize) : wavetableSynth(wavetableBuffer, wavetableSize)
 	{
 		excitationNumSamples = duration * SAMPLES_PER_MILLISECOND;
-		wavetableSynth.setFrequency(10440, 44100);
+		wavetableSynth.setFrequency(1440, 44100);
 	}
 	~WavetableExciter() {}
 	float getNextSample()
diff --git a/main.cpp b/main.cpp
index 5d6c21b..ce6d63f 100644
--- a/main.cpp
+++ b/main.cpp
@@ -6,64 +6,17 @@
 #include "OpenCL_FDTD.hpp"
 #include "OpenGL_FDTD.hpp"
 
-#include "tinywav.h"
+//Parsing parameters as json file//
 #include "json.hpp"
 
+//Save samples in wav file//
+#include "AudioFile.h"
+
+//Playback samples at end of computation//
 #include <SFML/Audio.hpp>
-sf::Int16 normalize(float currentValue, float currentMin, float currentMax, float newMin, float newMax)
-{
-	sf::Int16 newValue = (((currentValue - currentMin)*(newMax - newMin)) / (currentMax - currentMin)) + newMin;
-	return newValue;
-}
+sf::Int16 normalize(float currentValue, float currentMin, float currentMax, float newMin, float newMax);
 
-static void show_usage(std::string name)
-{
-	std::cerr << "Usage: " << "IWOCL FDTD Benchmarking"
-		<< "Options:\n"
-		<< "\t-h,--help\t\t\tShow this help message\n"
-		<< "\t-j,--json Use json file(Required)\tSpecify the json file path"
-		<< "\tExample json file: "
-		<< "\t"
-		<< "\n{"
-		<< "\n"
-		<< "\n	\"general\": {"
-		<< "\n		\"sampleRate\": 44100,"
-		<< "\n		\"bufferSize\" : 512,"
-		<< "\n		\"numberBuffers\" : 100,"
-		<< "\n		\"isDebug\" : false,"
-		<< "\n		\"isAudio\" : true,"
-		<< "\n		\"isBenchmarking\" : true,"
-		<< "\n		\"isLog\" : false"
-		<< "\n		},"
-		<< "\n"
-		<< "\n		\"implementation\": {"
-		<< "\n			\"type\": \"OpenL\","
-		<< "\n			\"modelWidth\" : 32,"
-		<< "\n			\"modelHeight\" : 32,"
-		<< "\n			\"propagationFactor\" : 0.35,"
-		<< "\n			\"dampingCoefficient\" : 0.005,"
-		<< "\n			\"boundaryGain\" : 0.02,"
-		<< "\n			\"listenerPosition\" : [8, 8],"
-		<< "\n			\"excitationPosition\" : [16, 16],"
-		<< "\n"
-		<< "\n			\"OpenCL\" : {"
-		<< "\n			\"workGroupDimensions\": [16, 16],"
-		<< "\n				\"kernelSource\" : \"Kernels/fdtdGlobal.cl\""
-		<< "\n			},"
-		<< "\n"
-		<< "\n			\"OpenGL\" : {"
-		<< "\n				\"fboVsSource\": \"Shaders/fbo_vs.glsl\","
-		<< "\n				\"fboFsSource\" : \"Shaders/fbo_fs.glsl\","
-		<< "\n				\"renderVsSource\" : \"Shaders/render_vs.glsl\","
-		<< "\n				\"renderFsSource\" : \"Shaders/render_fs.glsl\","
-		<< "\n				\"isVisualize\" : true"
-		<< "\n			},"
-		<< "\n"
-		<< "\n		}"
-		<< "\n"
-		<< "\n}"
-		<< std::endl;
-}
+static void show_usage(std::string name);
 
 enum implementation { NONE, CPUSerial = 1, CPUSIMD = 2, OpenCL = 3, OpenGL = 4 };
 
@@ -71,19 +24,11 @@ using nlohmann::json;
 
 int main(int argc, char* argv[])
 {
-	bool isDebug = false;
-	bool isLog = false;
-	bool benchmark = false;
-	bool visualize = false;
-	bool isAudio = false;
-	bool isBenchmarking = false;
-
 	//Parse command line arguments//
 	if (argc < 2) {
 		show_usage(argv[0]);
 		return 1;
 	}
-
 	std::string jsonPath;
 	for (int i = 1; i < argc; ++i) {
 		std::string arg = argv[i];
@@ -95,7 +40,7 @@ int main(int argc, char* argv[])
 			jsonPath = argv[i + 1];
 	}
 
-
+	//Read json file into program object//
 	std::ifstream ifs(jsonPath);
 	json j = json::parse(ifs);
 	//std::cout << j << std::endl;
@@ -104,31 +49,26 @@ int main(int argc, char* argv[])
 	implementation implementation = NONE;
 	if (j["implementation"]["type"] == "CPUSerial")
 		implementation = CPUSerial;
-	if (j["implementation"]["type"] == "CPUSIMD")
+	else if (j["implementation"]["type"] == "CPUSIMD")
 		implementation = CPUSIMD;
-	if (j["implementation"]["type"] == "OpenCL")
+	else if (j["implementation"]["type"] == "OpenCL")
 		implementation = OpenCL;
-	if (j["implementation"]["type"] == "OpenGL")
+	else if (j["implementation"]["type"] == "OpenGL")
 		implementation = OpenGL;
-	if (j["general"]["isDebug"] == true)
-		isDebug = true;
-	if (j["general"]["isAudio"] == true)
-		isAudio = true;
-	if (j["general"]["isBenchmarking"] == true)
-		isBenchmarking = true;
-	if (j["general"]["isLog"] == true)
-		isLog = true;
-
-	if (implementation == NONE)
+	else if (implementation == NONE)
 	{
 		show_usage(argv[0]);
 		return 1;
 	}
 
 	//General parameters//
-	const int sampleRate = j["general"]["sampleRate"];
-	const int bufferSize = j["general"]["bufferSize"];
-	const int numBuffers = j["general"]["numberBuffers"];
+	const int sampleRate =			j["general"]["sampleRate"];
+	const int bufferSize =			j["general"]["bufferSize"];
+	const int numBuffers =			j["general"]["numberBuffers"];
+	const bool isDebug =			j["general"]["isDebug"];
+	const bool isLog =				j["general"]["isLog"];
+	const bool isBenchmarking =		j["general"]["isBenchmarking"];
+	const bool isAudio =			j["general"]["isAudio"];
 
 	DSP* simulation;
 	simulation = new OpenCL_FDTD();
@@ -137,15 +77,16 @@ int main(int argc, char* argv[])
 	if (implementation == CPUSerial)
 	{
 		CPU_FDTD_Arguments args;
-		args.modelWidth = j["implementation"]["modelWidth"];
-		args.modelHeight = j["implementation"]["modelHeight"];
-		args.propagationFactor = j["implementation"]["propagationFactor"];
-		args.dampingCoefficient = j["implementation"]["dampingCoefficient"];
-		args.boundaryGain = j["implementation"]["boundaryGain"];
-		args.listenerPosition[0] = j["implementation"]["listenerPosition"][0];
-		args.listenerPosition[1] = j["implementation"]["listenerPosition"][1];
-		args.excitationPosition[0] = j["implementation"]["excitationPosition"][0];
-		args.excitationPosition[1] = j["implementation"]["excitationPosition"][1];
+		args.isDebug = isDebug;
+		args.modelWidth =					j["implementation"]["modelWidth"];
+		args.modelHeight =					j["implementation"]["modelHeight"];
+		args.propagationFactor =			j["implementation"]["propagationFactor"];
+		args.dampingCoefficient =			j["implementation"]["dampingCoefficient"];
+		args.boundaryGain =					j["implementation"]["boundaryGain"];
+		args.listenerPosition[0] =			j["implementation"]["listenerPosition"][0];
+		args.listenerPosition[1] =			j["implementation"]["listenerPosition"][1];
+		args.excitationPosition[0] =		j["implementation"]["excitationPosition"][0];
+		args.excitationPosition[1] =		j["implementation"]["excitationPosition"][1];
 		args.bufferSize = bufferSize;
 		simulation = new CPU_FDTD_Serial(args);
 	}
@@ -154,15 +95,16 @@ int main(int argc, char* argv[])
 	if (implementation == CPUSIMD)
 	{
 		CPU_FDTD_Arguments args;
-		args.modelWidth = j["implementation"]["modelWidth"];
-		args.modelHeight = j["implementation"]["modelHeight"];
-		args.propagationFactor = j["implementation"]["propagationFactor"];
-		args.dampingCoefficient = j["implementation"]["dampingCoefficient"];
-		args.boundaryGain = j["implementation"]["boundaryGain"];
-		args.listenerPosition[0] = j["implementation"]["listenerPosition"][0];
-		args.listenerPosition[1] = j["implementation"]["listenerPosition"][1];
-		args.excitationPosition[0] = j["implementation"]["excitationPosition"][0];
-		args.excitationPosition[1] = j["implementation"]["excitationPosition"][1];
+		args.isDebug = isDebug;
+		args.modelWidth =					j["implementation"]["modelWidth"];
+		args.modelHeight =					j["implementation"]["modelHeight"];
+		args.propagationFactor =			j["implementation"]["propagationFactor"];
+		args.dampingCoefficient =			j["implementation"]["dampingCoefficient"];
+		args.boundaryGain =					j["implementation"]["boundaryGain"];
+		args.listenerPosition[0] =			j["implementation"]["listenerPosition"][0];
+		args.listenerPosition[1] =			j["implementation"]["listenerPosition"][1];
+		args.excitationPosition[0] =		j["implementation"]["excitationPosition"][0];
+		args.excitationPosition[1] =		j["implementation"]["excitationPosition"][1];
 		args.bufferSize = bufferSize;
 		simulation = new CPU_FDTD_SIMD(args);
 	}
@@ -171,19 +113,21 @@ int main(int argc, char* argv[])
 	if (implementation == OpenCL)
 	{
 		OpenCL_FDTD_Arguments args;
-		args.modelWidth = j["implementation"]["modelWidth"];
-		args.modelHeight = j["implementation"]["modelHeight"];
-		args.propagationFactor = j["implementation"]["propagationFactor"];
-		args.dampingCoefficient = j["implementation"]["dampingCoefficient"];
-		args.boundaryGain = j["implementation"]["boundaryGain"];
-		args.listenerPosition[0] = j["implementation"]["listenerPosition"][0];
-		args.listenerPosition[1] = j["implementation"]["listenerPosition"][1];
-		args.excitationPosition[0] = j["implementation"]["excitationPosition"][0];
-		args.excitationPosition[1] = j["implementation"]["excitationPosition"][1];
-		args.workGroupDimensions[0] = j["implementation"]["OpenCL"]["workGroupDimensions"][0];
-		args.workGroupDimensions[1] = j["implementation"]["OpenCL"]["workGroupDimensions"][1];
+		args.isDebug = isDebug;
+		args.isBenchmark =					j["implementation"]["OpenCL"]["isBenchmarking"];
+		args.modelWidth =					j["implementation"]["modelWidth"];
+		args.modelHeight =					j["implementation"]["modelHeight"];
+		args.propagationFactor =			j["implementation"]["propagationFactor"];
+		args.dampingCoefficient =			j["implementation"]["dampingCoefficient"];
+		args.boundaryGain =					j["implementation"]["boundaryGain"];
+		args.listenerPosition[0] =			j["implementation"]["listenerPosition"][0];
+		args.listenerPosition[1] =			j["implementation"]["listenerPosition"][1];
+		args.excitationPosition[0] =		j["implementation"]["excitationPosition"][0];
+		args.excitationPosition[1] =		j["implementation"]["excitationPosition"][1];
+		args.workGroupDimensions[0] =		j["implementation"]["OpenCL"]["workGroupDimensions"][0];
+		args.workGroupDimensions[1] =		j["implementation"]["OpenCL"]["workGroupDimensions"][1];
 		args.bufferSize = bufferSize;
-		args.kernelSource = j["implementation"]["OpenCL"]["kernelSource"].get<std::string>();
+		args.kernelSource =					j["implementation"]["OpenCL"]["kernelSource"].get<std::string>();
 		simulation = new OpenCL_FDTD(args);
 	}
 
@@ -191,27 +135,28 @@ int main(int argc, char* argv[])
 	if (implementation == OpenGL)
 	{
 		OpenGL_FDTD_Arguments args;
-		args.modelWidth = j["implementation"]["modelWidth"];
-		args.modelHeight = j["implementation"]["modelHeight"];
-		args.propagationFactor = j["implementation"]["propagationFactor"];
-		args.dampingCoefficient = j["implementation"]["dampingCoefficient"];
-		args.boundaryGain = j["implementation"]["boundaryGain"];
-		args.listenerPosition[0] = j["implementation"]["listenerPosition"][0];
-		args.listenerPosition[1] = j["implementation"]["listenerPosition"][1];
-		args.excitationPosition[0] = j["implementation"]["excitationPosition"][0];
-		args.excitationPosition[1] = j["implementation"]["excitationPosition"][1];
+		args.isDebug = isDebug;
+		args.isBenchmark = isBenchmarking;
+		args.modelWidth =					j["implementation"]["modelWidth"];
+		args.modelHeight =					j["implementation"]["modelHeight"];
+		args.propagationFactor =			j["implementation"]["propagationFactor"];
+		args.dampingCoefficient =			j["implementation"]["dampingCoefficient"];
+		args.boundaryGain =					j["implementation"]["boundaryGain"];
+		args.listenerPosition[0] =			j["implementation"]["listenerPosition"][0];
+		args.listenerPosition[1] =			j["implementation"]["listenerPosition"][1];
+		args.excitationPosition[0] =		j["implementation"]["excitationPosition"][0];
+		args.excitationPosition[1] =		j["implementation"]["excitationPosition"][1];
 		args.bufferSize = bufferSize;
-		args.fboVsSource = j["implementation"]["OpenGL"]["fboVsSource"].get<std::string>();
-		args.fboFsSource = j["implementation"]["OpenGL"]["fboFsSource"].get<std::string>();
-		args.renderVsSource = j["implementation"]["OpenGL"]["renderVsSource"].get<std::string>();
-		args.renderFsSource = j["implementation"]["OpenGL"]["renderFsSource"].get<std::string>();
-		args.isVisualize = j["implementation"]["OpenGL"]["isVisualize"];
+		args.fboVsSource =					j["implementation"]["OpenGL"]["fboVsSource"].get<std::string>();
+		args.fboFsSource =					j["implementation"]["OpenGL"]["fboFsSource"].get<std::string>();
+		args.renderVsSource =				j["implementation"]["OpenGL"]["renderVsSource"].get<std::string>();
+		args.renderFsSource =				j["implementation"]["OpenGL"]["renderFsSource"].get<std::string>();
+		args.isVisualize =					j["implementation"]["OpenGL"]["isVisualize"];
 		simulation = new OpenGL_FDTD(args);
 	}
 
-	
 	std::vector<float> wave = { 0.057564,0.114937,0.171929,0.228351,0.284015,0.338738,0.392337,0.444635,0.495459,0.544639,0.592013,0.637424,0.680721,0.721760,0.760406,0.796530,0.830012,0.860742,0.888617,0.913546,0.935444,0.954240,0.969872,0.982287,0.991445,0.997315,0.999877,0.999123,0.995055,0.987688,0.977045,0.963162,0.946085,0.925870,0.902585,0.876307,0.847122,0.815128,0.780430,0.743145,0.703395,0.661312,0.617036,0.570714,0.522499,0.472551,0.421036,0.368125,0.313993,0.258820,0.202788,0.146084,0.088895,0.031412,-0.026176,-0.083677,-0.140900,-0.197656,-0.253757,-0.309016,-0.363250,-0.416280,-0.467929,-0.518026,-0.566405,-0.612906,-0.657374,-0.699662,-0.739630,-0.777145,-0.812083,-0.844327,-0.873771,-0.900318,-0.923879,-0.944376,-0.961741,-0.975916,-0.986855,-0.994522,-0.998890,-0.999945,-0.997684,-0.992115,-0.983255,-0.971135,-0.955794,-0.937283,-0.915664,-0.891008,-0.863397,-0.832923,-0.799686,-0.763798,-0.725376,-0.684549,-0.641452,-0.596227,-0.549025,-0.500003,-0.449322,-0.397151,-0.343663,-0.289035,-0.233449,-0.177088,-0.120140,-0.062794,-0.005240 };
-	WavetableExciter wavetableExciter_(5000, &(wave[0]), wave.size());
+	WavetableExciter wavetableExciter_(5, &(wave[0]), wave.size());
 
 	std::vector<sf::Int16> audioBuffer;
 
@@ -220,17 +165,18 @@ int main(int argc, char* argv[])
 	if (isBenchmarking)
 		start = std::chrono::steady_clock::now();
 
-	TinyWav tw;
+	//Create .wav file for output samples//
+	AudioFile<float> audioFile;
+	AudioFile<float>::AudioBuffer buffer;
 	if (isLog)
-		tinywav_open_write(&tw,
-			1,
-			44100,
-			TW_FLOAT32, // the output samples will be 32-bit floats. TW_INT16 is also supported
-			TW_INLINE,  // the samples will be presented inlined in a single buffer.
-						// Other options include TW_INTERLEAVED and TW_SPLIT
-			"output.wav" // the output path
-		);
+	{
+		buffer.resize(1);
+		buffer[0].resize(numBuffers*bufferSize);
+		audioFile.setBitDepth(24);
+		audioFile.setSampleRate(44100);
+	}
 
+	//Compute samples from FDTD implementation chosen//
 	for (int i = 0; i != numBuffers; ++i)
 	{
 		float* inputExcitation = new float[bufferSize];
@@ -254,21 +200,33 @@ int main(int argc, char* argv[])
 				audioBuffer.push_back(normalize((float)outputSamples[k], -1.0, 1.0, -32768, 32768));
 		}
 
-		if(isLog)
-			tinywav_write_f(&tw, (float*)outputSamples, bufferSize);
+		if (isLog)
+		{
+			for (int k = 0; k != bufferSize; ++k)
+				buffer[0][i*bufferSize + k] = (float)outputSamples[k];
+		}
+	}
+	if (isLog)
+	{
+		audioFile.setAudioBuffer(buffer);
+		audioFile.save("audioFile.wav");
 	}
-	if(isLog)
-		tinywav_close_write(&tw);
 
 	//End total compute time//
 	if (isBenchmarking)
 	{
+		//Print OpenCL Kernel compute time//
+		if (implementation == OpenCL)
+			((OpenCL_FDTD*)simulation)->printKernelBenchmarking();
+		if(implementation == OpenGL)
+			((OpenGL_FDTD*)simulation)->printKernelBenchmarking();
+
+		//Print total compute time//
 		auto end = std::chrono::steady_clock::now();
 		auto diff = end - start;
-		std::cout << "Time to complete: " << std::chrono::duration<double>(diff).count() << "s" << std::endl;
-		std::cout << "Time to complete: " << std::chrono::duration <double, std::centi>(diff).count() << "cs" << std::endl;
-		std::cout << "Time to complete: " << std::chrono::duration <double, std::milli>(diff).count() << "ms" << std::endl;
-		std::cout << "Time to complete: " << std::chrono::duration <double, std::nano>(diff).count() << "ns" << std::endl;
+		std::cout << "Total time to complete: " << std::chrono::duration<double>(diff).count() << "s" << std::endl;
+		std::cout << "Total time to complete: " << std::chrono::duration <double, std::milli>(diff).count() << "ms" << std::endl;
+		std::cout << "Total time to complete: " << std::chrono::duration <double, std::nano>(diff).count() << "ns" << std::endl;
 	}
 
 	sf::SoundBuffer soundBuffer;
@@ -287,4 +245,59 @@ int main(int argc, char* argv[])
 	getchar();
 
 	return 0;
+}
+
+sf::Int16 normalize(float currentValue, float currentMin, float currentMax, float newMin, float newMax)
+{
+	sf::Int16 newValue = (((currentValue - currentMin)*(newMax - newMin)) / (currentMax - currentMin)) + newMin;
+	return newValue;
+}
+
+static void show_usage(std::string name)
+{
+	std::cerr << "Usage: " << "IWOCL FDTD Benchmarking"
+		<< "Options:\n"
+		<< "\t-h,--help\t\t\tShow this help message\n"
+		<< "\t-j,--json Use json file(Required)\tSpecify the json file path"
+		<< "\tExample json file: "
+		<< "\t"
+		<< "\n{"
+		<< "\n"
+		<< "\n	\"general\": {"
+		<< "\n		\"sampleRate\": 44100,"
+		<< "\n		\"bufferSize\" : 512,"
+		<< "\n		\"numberBuffers\" : 100,"
+		<< "\n		\"isDebug\" : false,"
+		<< "\n		\"isAudio\" : true,"
+		<< "\n		\"isBenchmarking\" : true,"
+		<< "\n		\"isLog\" : false"
+		<< "\n		},"
+		<< "\n"
+		<< "\n		\"implementation\": {"
+		<< "\n			\"type\": \"OpenL\","
+		<< "\n			\"modelWidth\" : 32,"
+		<< "\n			\"modelHeight\" : 32,"
+		<< "\n			\"propagationFactor\" : 0.35,"
+		<< "\n			\"dampingCoefficient\" : 0.005,"
+		<< "\n			\"boundaryGain\" : 0.02,"
+		<< "\n			\"listenerPosition\" : [8, 8],"
+		<< "\n			\"excitationPosition\" : [16, 16],"
+		<< "\n"
+		<< "\n			\"OpenCL\" : {"
+		<< "\n			\"workGroupDimensions\": [16, 16],"
+		<< "\n				\"kernelSource\" : \"Kernels/fdtdGlobal.cl\""
+		<< "\n			},"
+		<< "\n"
+		<< "\n			\"OpenGL\" : {"
+		<< "\n				\"fboVsSource\": \"Shaders/fbo_vs.glsl\","
+		<< "\n				\"fboFsSource\" : \"Shaders/fbo_fs.glsl\","
+		<< "\n				\"renderVsSource\" : \"Shaders/render_vs.glsl\","
+		<< "\n				\"renderFsSource\" : \"Shaders/render_fs.glsl\","
+		<< "\n				\"isVisualize\" : true"
+		<< "\n			},"
+		<< "\n"
+		<< "\n		}"
+		<< "\n"
+		<< "\n}"
+		<< std::endl;
 }
\ No newline at end of file
diff --git a/parameters.json b/parameters.json
index 314fe26..b35ae8d 100644
--- a/parameters.json
+++ b/parameters.json
@@ -2,7 +2,7 @@
 
 "general": {
 "sampleRate": 44100,
-"bufferSize": 512,
+"bufferSize": 128,
 "numberBuffers": 100,
 "isDebug": false,
 "isAudio": true,
@@ -11,17 +11,18 @@
 },
 
 "implementation": {
-  "type": "OpenCL",
-  "modelWidth": 256,
-  "modelHeight": 256,
-  "propagationFactor": 0.45,
-  "dampingCoefficient": 0.00005,
+  "type": "CPUSerial",
+  "modelWidth": 8,
+  "modelHeight": 8,
+  "propagationFactor": 0.252,
+  "dampingCoefficient": 0.0012,
   "boundaryGain": 0.02,
-  "listenerPosition": [9, 9],
-  "excitationPosition": [141, 141],
+  "listenerPosition": [3, 3],
+  "excitationPosition": [6, 6],
   
   "OpenCL": {
-	"workGroupDimensions": [8, 8],
+	"isBenchmarking": false,
+	"workGroupDimensions": [16, 16],
 	"kernelSource": "Kernels/fdtdLocal.cl"
   },
   
@@ -31,14 +32,6 @@
 	"renderVsSource": "Shaders/render_vs.glsl",
 	"renderFsSource": "Shaders/render_fs.glsl",
 	"isVisualize": true
-  },
-  
-  "popup": {
-    "menuitem": [
-      {"value": "New", "onclick": "CreateNewDoc()"},
-      {"value": "Open", "onclick": "OpenDoc()"},
-      {"value": "Close", "onclick": "CloseDoc()"}
-    ]
   }
 }
 
diff --git a/tinywav.c b/tinywav.c
deleted file mode 100644
index 6e48203..0000000
--- a/tinywav.c
+++ /dev/null
@@ -1,258 +0,0 @@
-/**
- * Copyright (c) 2015-2017, Martin Roth (mhroth@gmail.com)
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
- * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
- * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
- * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-
-
-#include <assert.h>
-#include <string.h>
-#if _WIN32
-#include <winsock2.h>
-#include <malloc.h>
-#else
-#include <alloca.h>
-#include <netinet/in.h>
-#endif
-#include "tinywav.h"
-
-int tinywav_open_write(TinyWav *tw,
-	int16_t numChannels, int32_t samplerate,
-	TinyWavSampleFormat sampFmt, TinyWavChannelFormat chanFmt,
-	const char *path) {
-#if _WIN32
-	errno_t err = fopen_s(&tw->f, path, "w");
-	assert(err == 0);
-#else
-	tw->f = fopen(path, "w");
-#endif
-	assert(tw->f != NULL);
-	tw->numChannels = numChannels;
-	tw->totalFramesWritten = 0;
-	tw->sampFmt = sampFmt;
-	tw->chanFmt = chanFmt;
-
-	// prepare WAV header
-	TinyWavHeader h;
-	h.ChunkID = htonl(0x52494646); // "RIFF"
-	h.ChunkSize = 0; // fill this in on file-close
-	h.Format = htonl(0x57415645); // "WAVE"
-	h.Subchunk1ID = htonl(0x666d7420); // "fmt "
-	h.Subchunk1Size = 16; // PCM
-	h.AudioFormat = (tw->sampFmt - 1); // 1 PCM, 3 IEEE float
-	h.NumChannels = numChannels;
-	h.SampleRate = samplerate;
-	h.ByteRate = samplerate * numChannels * tw->sampFmt;
-	h.BlockAlign = numChannels * tw->sampFmt;
-	h.BitsPerSample = 8 * tw->sampFmt;
-	h.Subchunk2ID = htonl(0x64617461); // "data"
-	h.Subchunk2Size = 0; // fill this in on file-close
-
-	// write WAV header
-	fwrite(&h, sizeof(TinyWavHeader), 1, tw->f);
-
-	return 0;
-}
-
-int tinywav_open_read(TinyWav *tw, const char *path, TinyWavChannelFormat chanFmt, TinyWavSampleFormat sampFmt) {
-	tw->f = fopen(path, "rb");
-	assert(tw->f != NULL);
-
-	size_t ret = fread(&tw->h, sizeof(TinyWavHeader), 1, tw->f);
-	assert(ret > 0);
-	assert(tw->h.ChunkID == htonl(0x52494646));        // "RIFF"
-	assert(tw->h.Format == htonl(0x57415645));         // "WAVE"
-	assert(tw->h.Subchunk1ID == htonl(0x666d7420));    // "fmt "
-
-	// skip over any other chunks before the "data" chunk
-	while (tw->h.Subchunk2ID != htonl(0x64617461)) {   // "data"
-		fseek(tw->f, 4, SEEK_CUR);
-		fread(&tw->h.Subchunk2ID, 4, 1, tw->f);
-	}
-	assert(tw->h.Subchunk2ID == htonl(0x64617461));    // "data"
-	fread(&tw->h.Subchunk2Size, 4, 1, tw->f);
-
-	tw->numChannels = tw->h.NumChannels;
-	tw->chanFmt = chanFmt;
-	tw->sampFmt = sampFmt;
-
-	tw->totalFramesWritten = tw->h.Subchunk2Size / (tw->numChannels * tw->sampFmt);
-	return 0;
-}
-
-int tinywav_read_f(TinyWav *tw, void *data, int len) { // returns number of frames read
-	switch (tw->sampFmt) {
-	case TW_INT16: { //TODO(gio): implement TW_INT16 conversion
-		int16_t *z = (int16_t *)alloca(tw->numChannels*len * sizeof(int16_t));
-		switch (tw->chanFmt) {
-		case TW_INTERLEAVED: {
-			const float *const x = (const float *const)data;
-			for (int i = 0; i < tw->numChannels*len; ++i) {
-				z[i] = (int16_t)(x[i] * 32767.0f);
-			}
-			break;
-		}
-		case TW_INLINE: {
-			const float *const x = (const float *const)data;
-			for (int i = 0, k = 0; i < len; ++i) {
-				for (int j = 0; j < tw->numChannels; ++j) {
-					z[k++] = (int16_t)(x[j*len + i] * 32767.0f);
-				}
-			}
-			break;
-		}
-		case TW_SPLIT: {
-			const float **const x = (const float **const)data;
-			for (int i = 0, k = 0; i < len; ++i) {
-				for (int j = 0; j < tw->numChannels; ++j) {
-					z[k++] = (int16_t)(x[j][i] * 32767.0f);
-				}
-			}
-			break;
-		}
-		default: return 0;
-		}
-
-		tw->totalFramesWritten += len;
-		return (int)fwrite(z, sizeof(int16_t), tw->numChannels*len, tw->f);
-	}
-	case TW_FLOAT32: {
-		size_t samples_read = 0;
-		float *interleaved_data = (float *)alloca(tw->numChannels*len * sizeof(float));
-		samples_read = fread(interleaved_data, sizeof(float), tw->numChannels*len, tw->f);
-		switch (tw->chanFmt) {
-		case TW_INTERLEAVED: { // channel buffer is interleaved e.g. [LRLRLRLR]
-			memcpy(data, interleaved_data, tw->numChannels*len * sizeof(float));
-			return (int)(samples_read / tw->numChannels);
-		}
-		case TW_INLINE: { // channel buffer is inlined e.g. [LLLLRRRR]
-			for (int i = 0, pos = 0; i < tw->numChannels; i++) {
-				for (int j = i; j < len * tw->numChannels; j += tw->numChannels, ++pos) {
-					((float *)data)[pos] = interleaved_data[j];
-				}
-			}
-			return (int)(samples_read / tw->numChannels);
-		}
-		case TW_SPLIT: { // channel buffer is split e.g. [[LLLL],[RRRR]]
-			for (int i = 0, pos = 0; i < tw->numChannels; i++) {
-				for (int j = 0; j < len; j++, ++pos) {
-					((float **)data)[i][j] = interleaved_data[j*tw->numChannels + i];
-				}
-			}
-			return (int)(samples_read / tw->numChannels);
-		}
-		default: return 0;
-		}
-	}
-	default: return 0;
-	}
-
-	return len;
-}
-
-void tinywav_close_read(TinyWav *tw) {
-	fclose(tw->f);
-	tw->f = NULL;
-}
-
-size_t tinywav_write_f(TinyWav *tw, void *f, int len) {
-	switch (tw->sampFmt) {
-	case TW_INT16: {
-		int16_t *z = (int16_t *)alloca(tw->numChannels*len * sizeof(int16_t));
-		switch (tw->chanFmt) {
-		case TW_INTERLEAVED: {
-			const float *const x = (const float *const)f;
-			for (int i = 0; i < tw->numChannels*len; ++i) {
-				z[i] = (int16_t)(x[i] * 32767.0f);
-			}
-			break;
-		}
-		case TW_INLINE: {
-			const float *const x = (const float *const)f;
-			for (int i = 0, k = 0; i < len; ++i) {
-				for (int j = 0; j < tw->numChannels; ++j) {
-					z[k++] = (int16_t)(x[j*len + i] * 32767.0f);
-				}
-			}
-			break;
-		}
-		case TW_SPLIT: {
-			const float **const x = (const float **const)f;
-			for (int i = 0, k = 0; i < len; ++i) {
-				for (int j = 0; j < tw->numChannels; ++j) {
-					z[k++] = (int16_t)(x[j][i] * 32767.0f);
-				}
-			}
-			break;
-		}
-		default: return 0;
-		}
-
-		tw->totalFramesWritten += len;
-		return fwrite(z, sizeof(int16_t), tw->numChannels*len, tw->f);
-		break;
-	}
-	case TW_FLOAT32: {
-		float *z = (float *)alloca(tw->numChannels*len * sizeof(float));
-		switch (tw->chanFmt) {
-		case TW_INTERLEAVED: {
-			tw->totalFramesWritten += len;
-			return fwrite(f, sizeof(float), tw->numChannels*len, tw->f);
-		}
-		case TW_INLINE: {
-			const float *const x = (const float *const)f;
-			for (int i = 0, k = 0; i < len; ++i) {
-				for (int j = 0; j < tw->numChannels; ++j) {
-					z[k++] = x[j*len + i];
-				}
-			}
-			break;
-		}
-		case TW_SPLIT: {
-			const float **const x = (const float **const)f;
-			for (int i = 0, k = 0; i < len; ++i) {
-				for (int j = 0; j < tw->numChannels; ++j) {
-					z[k++] = x[j][i];
-				}
-			}
-			break;
-		}
-		default: return 0;
-		}
-
-		tw->totalFramesWritten += len;
-		return fwrite(z, sizeof(float), tw->numChannels*len, tw->f);
-	}
-	default: return 0;
-	}
-}
-
-void tinywav_close_write(TinyWav *tw) {
-	uint32_t data_len = tw->totalFramesWritten * tw->numChannels * tw->sampFmt;
-
-	// set length of data
-	fseek(tw->f, 4, SEEK_SET);
-	uint32_t chunkSize_len = 36 + data_len;
-	fwrite(&chunkSize_len, sizeof(uint32_t), 1, tw->f);
-
-	fseek(tw->f, 40, SEEK_SET);
-	fwrite(&data_len, sizeof(uint32_t), 1, tw->f);
-
-	fclose(tw->f);
-	tw->f = NULL;
-}
-
-bool tinywav_isOpen(TinyWav *tw) {
-	return (tw->f != NULL);
-}
\ No newline at end of file
diff --git a/tinywav.h b/tinywav.h
deleted file mode 100644
index 16bf657..0000000
--- a/tinywav.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Copyright (c) 2015-2017, Martin Roth (mhroth@gmail.com)
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
- * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
- * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
- * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-
-
-#ifndef _TINY_WAV_
-#define _TINY_WAV_
-
-#include <stdint.h>
-#include <stdio.h>
-#include <stdbool.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-	// http://soundfile.sapp.org/doc/WaveFormat/
-	typedef struct TinyWavHeader {
-		uint32_t ChunkID;
-		uint32_t ChunkSize;
-		uint32_t Format;
-		uint32_t Subchunk1ID;
-		uint32_t Subchunk1Size;
-		uint16_t AudioFormat;
-		uint16_t NumChannels;
-		uint32_t SampleRate;
-		uint32_t ByteRate;
-		uint16_t BlockAlign;
-		uint16_t BitsPerSample;
-		uint32_t Subchunk2ID;
-		uint32_t Subchunk2Size;
-	} TinyWavHeader;
-
-	typedef enum TinyWavChannelFormat {
-		TW_INTERLEAVED, // channel buffer is interleaved e.g. [LRLRLRLR]
-		TW_INLINE,      // channel buffer is inlined e.g. [LLLLRRRR]
-		TW_SPLIT        // channel buffer is split e.g. [[LLLL],[RRRR]]
-	} TinyWavChannelFormat;
-
-	typedef enum TinyWavSampleFormat {
-		TW_INT16 = 2,  // two byte signed integer
-		TW_FLOAT32 = 4 // four byte IEEE float
-	} TinyWavSampleFormat;
-
-	typedef struct TinyWav {
-		FILE *f;
-		TinyWavHeader h;
-		int16_t numChannels;
-		uint32_t totalFramesWritten;
-		TinyWavChannelFormat chanFmt;
-		TinyWavSampleFormat sampFmt;
-	} TinyWav;
-
-	/**
-	 * Open a file for writing.
-	 *
-	 * @param numChannels  The number of channels to write.
-	 * @param samplerate   The sample rate of the audio.
-	 * @param sampFmt      The sample format (e.g. 16-bit integer or 32-bit float).
-	 * @param chanFmt      The channel format (how the channel data is layed out in memory)
-	 * @param path         The path of the file to write to. The file will be overwritten.
-	 *
-	 * @return  The error code. Zero if no error.
-	 */
-	int tinywav_open_write(TinyWav *tw,
-		int16_t numChannels, int32_t samplerate,
-		TinyWavSampleFormat sampFmt, TinyWavChannelFormat chanFmt,
-		const char *path);
-
-	/**
-	 * Open a file for reading.
-	 *
-	 * @param sampFmt  The sample format (e.g. 16-bit integer or 32-bit float)
-	 *                 that the file should be converted to.
-	 * @param chanFmt  The channel format (how the channel data is layed out in memory) when read.
-	 * @param path     The path of the file to read.
-	 *
-	 * @return  The error code. Zero if no error.
-	 */
-	int tinywav_open_read(TinyWav *tw, const char *path,
-		TinyWavChannelFormat chanFmt, TinyWavSampleFormat sampFmt);
-
-	/**
-	 * Read sample data from the file.
-	 *
-	 * @param data  A pointer to the data structure to read to. This data is expected to have the
-	 *              correct memory layout to match the specifications given in tinywav_open_read().
-	 * @param len   The number of frames to read.
-	 */
-	int tinywav_read_f(TinyWav *tw, void *data, int len);
-
-	/** Stop reading the file. The Tinywav struct is now invalid. */
-	void tinywav_close_read(TinyWav *tw);
-
-	/**
-	 * Write sample data to file.
-	 *
-	 * @param tw   The TinyWav structure which has already been prepared.
-	 * @param f    A pointer to the sample data to write.
-	 * @param len  The number of frames to write.
-	 *
-	 * @return The total number of samples written to file.
-	 */
-	size_t tinywav_write_f(TinyWav *tw, void *f, int len);
-
-	/** Stop writing to the file. The Tinywav struct is now invalid. */
-	void tinywav_close_write(TinyWav *tw);
-
-	/** Returns true if the Tinywav struct is available to write or write. False otherwise. */
-	bool tinywav_isOpen(TinyWav *tw);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // _TINY_WAV_
\ No newline at end of file
-- 
GitLab