diff --git a/node_modules/.bin/csv-parser b/node_modules/.bin/csv-parser
new file mode 100644
index 0000000000000000000000000000000000000000..bb9d835ddcb2232f398f12fdef00102249a49f42
--- /dev/null
+++ b/node_modules/.bin/csv-parser
@@ -0,0 +1,12 @@
+#!/bin/sh
+basedir=$(dirname "$(echo "$0" | sed -e 's,\\,/,g')")
+
+case `uname` in
+    *CYGWIN*|*MINGW*|*MSYS*) basedir=`cygpath -w "$basedir"`;;
+esac
+
+if [ -x "$basedir/node" ]; then
+  exec "$basedir/node"  "$basedir/../csv-parser/bin/csv-parser" "$@"
+else 
+  exec node  "$basedir/../csv-parser/bin/csv-parser" "$@"
+fi
diff --git a/node_modules/.bin/csv-parser.cmd b/node_modules/.bin/csv-parser.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..f63e2b848ccecd2ab0743dbdbf1baa22356ced27
--- /dev/null
+++ b/node_modules/.bin/csv-parser.cmd
@@ -0,0 +1,17 @@
+@ECHO off
+GOTO start
+:find_dp0
+SET dp0=%~dp0
+EXIT /b
+:start
+SETLOCAL
+CALL :find_dp0
+
+IF EXIST "%dp0%\node.exe" (
+  SET "_prog=%dp0%\node.exe"
+) ELSE (
+  SET "_prog=node"
+  SET PATHEXT=%PATHEXT:;.JS;=;%
+)
+
+endLocal & goto #_undefined_# 2>NUL || title %COMSPEC% & "%_prog%"  "%dp0%\..\csv-parser\bin\csv-parser" %*
diff --git a/node_modules/.bin/csv-parser.ps1 b/node_modules/.bin/csv-parser.ps1
new file mode 100644
index 0000000000000000000000000000000000000000..34496c29ea5f0daf31b6d102e03ff2f9d9d3fc61
--- /dev/null
+++ b/node_modules/.bin/csv-parser.ps1
@@ -0,0 +1,28 @@
+#!/usr/bin/env pwsh
+$basedir=Split-Path $MyInvocation.MyCommand.Definition -Parent
+
+$exe=""
+if ($PSVersionTable.PSVersion -lt "6.0" -or $IsWindows) {
+  # Fix case when both the Windows and Linux builds of Node
+  # are installed in the same directory
+  $exe=".exe"
+}
+$ret=0
+if (Test-Path "$basedir/node$exe") {
+  # Support pipeline input
+  if ($MyInvocation.ExpectingInput) {
+    $input | & "$basedir/node$exe"  "$basedir/../csv-parser/bin/csv-parser" $args
+  } else {
+    & "$basedir/node$exe"  "$basedir/../csv-parser/bin/csv-parser" $args
+  }
+  $ret=$LASTEXITCODE
+} else {
+  # Support pipeline input
+  if ($MyInvocation.ExpectingInput) {
+    $input | & "node$exe"  "$basedir/../csv-parser/bin/csv-parser" $args
+  } else {
+    & "node$exe"  "$basedir/../csv-parser/bin/csv-parser" $args
+  }
+  $ret=$LASTEXITCODE
+}
+exit $ret
diff --git a/node_modules/.package-lock.json b/node_modules/.package-lock.json
index 56fc016916c750149d391d091296a3ed273d30de..83d5752b032c16a6d7c212df23db36cbbe5213a0 100644
--- a/node_modules/.package-lock.json
+++ b/node_modules/.package-lock.json
@@ -1991,6 +1991,20 @@
         "node": ">=12.10"
       }
     },
+    "node_modules/csv-parser": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/csv-parser/-/csv-parser-3.0.0.tgz",
+      "integrity": "sha512-s6OYSXAK3IdKqYO33y09jhypG/bSDHPuyCme/IdEHfWpLf/jKcpitVFyOC6UemgGk8v7Q5u2XE0vvwmanxhGlQ==",
+      "dependencies": {
+        "minimist": "^1.2.0"
+      },
+      "bin": {
+        "csv-parser": "bin/csv-parser"
+      },
+      "engines": {
+        "node": ">= 10"
+      }
+    },
     "node_modules/datatables.net": {
       "version": "1.13.4",
       "resolved": "https://registry.npmjs.org/datatables.net/-/datatables.net-1.13.4.tgz",
@@ -4397,7 +4411,6 @@
       "version": "1.2.7",
       "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz",
       "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==",
-      "dev": true,
       "funding": {
         "url": "https://github.com/sponsors/ljharb"
       }
diff --git a/node_modules/csv-parser/LICENSE b/node_modules/csv-parser/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..757562ec59276bff35792501d88fe83b34acca9a
--- /dev/null
+++ b/node_modules/csv-parser/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Mathias Buus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
\ No newline at end of file
diff --git a/node_modules/csv-parser/README.md b/node_modules/csv-parser/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..bfd4b31491538e5fab88f64dae1e4c66fad4b867
--- /dev/null
+++ b/node_modules/csv-parser/README.md
@@ -0,0 +1,358 @@
+[tests]: 	http://img.shields.io/travis/mafintosh/csv-parser.svg
+[tests-url]: http://travis-ci.org/mafintosh/csv-parser
+
+[cover]: https://codecov.io/gh/mafintosh/csv-parser/branch/master/graph/badge.svg
+[cover-url]: https://codecov.io/gh/mafintosh/csv-parser
+
+[size]: https://packagephobia.now.sh/badge?p=csv-parser
+[size-url]: https://packagephobia.now.sh/result?p=csv-parser
+
+# csv-parser
+
+[![tests][tests]][tests-url]
+[![cover][cover]][cover-url]
+[![size][size]][size-url]
+
+Streaming CSV parser that aims for maximum speed as well as compatibility with
+the [csv-spectrum](https://npmjs.org/csv-spectrum) CSV acid test suite.
+
+`csv-parser` can convert CSV into JSON at at rate of around 90,000 rows per
+second. Performance varies with the data used; try `bin/bench.js <your file>`
+to benchmark your data.
+
+`csv-parser` can be used in the browser with [browserify](http://browserify.org/).
+
+[neat-csv](https://github.com/sindresorhus/neat-csv) can be used if a `Promise`
+based interface to `csv-parser` is needed.
+
+_Note: This module requires Node v8.16.0 or higher._
+
+## Benchmarks
+
+⚡️ `csv-parser` is greased-lightning fast
+
+```console
+→ npm run bench
+
+  Filename                 Rows Parsed  Duration
+  backtick.csv                       2     3.5ms
+  bad-data.csv                       3    0.55ms
+  basic.csv                          1    0.26ms
+  comma-in-quote.csv                 1    0.29ms
+  comment.csv                        2    0.40ms
+  empty-columns.csv                  1    0.40ms
+  escape-quotes.csv                  3    0.38ms
+  geojson.csv                        3    0.46ms
+  large-dataset.csv               7268      73ms
+  newlines.csv                       3    0.35ms
+  no-headers.csv                     3    0.26ms
+  option-comment.csv                 2    0.24ms
+  option-escape.csv                  3    0.25ms
+  option-maxRowBytes.csv          4577      39ms
+  option-newline.csv                 0    0.47ms
+  option-quote-escape.csv            3    0.33ms
+  option-quote-many.csv              3    0.38ms
+  option-quote.csv                   2    0.22ms
+  quotes+newlines.csv                3    0.20ms
+  strict.csv                         3    0.22ms
+  latin.csv                          2    0.38ms
+  mac-newlines.csv                   2    0.28ms
+  utf16-big.csv                      2    0.33ms
+  utf16.csv                          2    0.26ms
+  utf8.csv                           2    0.24ms
+```
+
+## Install
+
+Using npm:
+
+```console
+$ npm install csv-parser
+```
+
+Using yarn:
+
+```console
+$ yarn add csv-parser
+```
+
+## Usage
+
+To use the module, create a readable stream to a desired CSV file, instantiate
+`csv`, and pipe the stream to `csv`.
+
+Suppose you have a CSV file `data.csv` which contains the data:
+
+```
+NAME,AGE
+Daffy Duck,24
+Bugs Bunny,22
+```
+
+It could then be parsed, and results shown like so:
+
+``` js
+const csv = require('csv-parser')
+const fs = require('fs')
+const results = [];
+
+fs.createReadStream('data.csv')
+  .pipe(csv())
+  .on('data', (data) => results.push(data))
+  .on('end', () => {
+    console.log(results);
+    // [
+    //   { NAME: 'Daffy Duck', AGE: '24' },
+    //   { NAME: 'Bugs Bunny', AGE: '22' }
+    // ]
+  });
+```
+
+To specify options for `csv`, pass an object argument to the function. For
+example:
+
+```js
+csv({ separator: '\t' });
+```
+
+## API
+
+### csv([options | headers])
+
+Returns: `Array[Object]`
+
+#### options
+
+Type: `Object`
+
+As an alternative to passing an `options` object, you may pass an `Array[String]`
+which specifies the headers to use. For example:
+
+```js
+csv(['Name', 'Age']);
+```
+
+If you need to specify options _and_ headers, please use the the object notation
+with the `headers` property as shown below.
+
+#### escape
+
+Type: `String`<br>
+Default: `"`
+
+A single-character string used to specify the character used to escape strings
+in a CSV row.
+
+#### headers
+
+Type: `Array[String] | Boolean`
+
+Specifies the headers to use. Headers define the property key for each value in
+a CSV row. If no `headers` option is provided, `csv-parser` will use the first
+line in a CSV file as the header specification.
+
+If `false`, specifies that the first row in a data file does _not_ contain
+headers, and instructs the parser to use the column index as the key for each column.
+Using `headers: false` with the same `data.csv` example from above would yield:
+
+``` js
+[
+  { '0': 'Daffy Duck', '1': 24 },
+  { '0': 'Bugs Bunny', '1': 22 }
+]
+```
+
+_Note: If using the `headers` for an operation on a file which contains headers on the first line, specify `skipLines: 1` to skip over the row, or the headers row will appear as normal row data. Alternatively, use the `mapHeaders` option to manipulate existing headers in that scenario._
+
+#### mapHeaders
+
+Type: `Function`
+
+A function that can be used to modify the values of each header. Return a `String` to modify the header. Return `null` to remove the header, and it's column, from the results. 
+
+```js
+csv({
+  mapHeaders: ({ header, index }) => header.toLowerCase()
+})
+```
+
+##### Parameters
+
+**header** _String_ The current column header.<br/>
+**index** _Number_ The current column index.
+
+#### mapValues
+
+Type: `Function`
+
+A function that can be used to modify the content of each column. The return value will replace the current column content.
+
+```js
+csv({
+  mapValues: ({ header, index, value }) => value.toLowerCase()
+})
+```
+
+##### Parameters
+
+**header** _String_ The current column header.<br/>
+**index** _Number_ The current column index.<br/>
+**value** _String_ The current column value (or content).
+
+##### newline
+
+Type: `String`<br>
+Default: `\n`
+
+Specifies a single-character string to denote the end of a line in a CSV file.
+
+#### quote
+
+Type: `String`<br>
+Default: `"`
+
+Specifies a single-character string to denote a quoted string.
+
+#### raw
+
+Type: `Boolean`<br>
+
+If `true`, instructs the parser not to decode UTF-8 strings.
+
+#### separator
+
+Type: `String`<br>
+Default: `,`
+
+Specifies a single-character string to use as the column separator for each row.
+
+#### skipComments
+
+Type: `Boolean | String`<br>
+Default: `false`
+
+Instructs the parser to ignore lines which represent comments in a CSV file. Since there is no specification that dictates what a CSV comment looks like, comments should be considered non-standard. The "most common" character used to signify a comment in a CSV file is `"#"`. If this option is set to `true`, lines which begin with `#` will be skipped. If a custom character is needed to denote a commented line, this option may be set to a string which represents the leading character(s) signifying a comment line.
+
+#### skipLines
+
+Type: `Number`<br>
+Default: `0`
+
+Specifies the number of lines at the beginning of a data file that the parser should
+skip over, prior to parsing headers.
+
+#### maxRowBytes
+
+Type: `Number`<br>
+Default: `Number.MAX_SAFE_INTEGER`
+
+Maximum number of bytes per row. An error is thrown if a line exeeds this value. The default value is on 8 peta byte.
+
+#### strict
+
+Type: `Boolean`<br>
+Default: `false`
+
+If `true`, instructs the parser that the number of columns in each row must match
+the number of `headers` specified or throws an exception.
+if `false`: the headers are mapped to the column index
+   less columns: any missing column in the middle will result in a wrong property mapping!
+   more columns: the aditional columns will create a "_"+index properties - eg. "_10":"value"
+
+## Events
+
+The following events are emitted during parsing:
+
+### `data`
+
+Emitted for each row of data parsed with the notable exception of the header
+row. Please see [Usage](#Usage) for an example.
+
+### `headers`
+
+Emitted after the header row is parsed. The first parameter of the event
+callback is an `Array[String]` containing the header names.
+
+```js
+fs.createReadStream('data.csv')
+  .pipe(csv())
+  .on('headers', (headers) => {
+    console.log(`First header: ${headers[0]}`)
+  })
+```
+
+### Readable Stream Events
+
+Events available on Node built-in
+[Readable Streams](https://nodejs.org/api/stream.html#stream_class_stream_readable)
+are also emitted. The `end` event should be used to detect the end of parsing.
+
+## CLI
+
+This module also provides a CLI which will convert CSV to
+[newline-delimited](http://ndjson.org/) JSON. The following CLI flags can be
+used to control how input is parsed:
+
+```
+Usage: csv-parser [filename?] [options]
+
+  --escape,-e         Set the escape character (defaults to quote value)
+  --headers,-h        Explicitly specify csv headers as a comma separated list
+  --help              Show this help
+  --output,-o         Set output file. Defaults to stdout
+  --quote,-q          Set the quote character ('"' by default)
+  --remove            Remove columns from output by header name
+  --separator,-s      Set the separator character ("," by default)
+  --skipComments,-c   Skip CSV comments that begin with '#'. Set a value to change the comment character.
+  --skipLines,-l      Set the number of lines to skip to before parsing headers
+  --strict            Require column length match headers length
+  --version,-v        Print out the installed version
+```
+
+For example; to parse a TSV file:
+
+```
+cat data.tsv | csv-parser -s $'\t'
+```
+
+## Encoding
+
+Users may encounter issues with the encoding of a CSV file. Transcoding the
+source stream can be done neatly with a modules such as:
+- [`iconv-lite`](https://www.npmjs.com/package/iconv-lite)
+- [`iconv`](https://www.npmjs.com/package/iconv)
+
+Or native [`iconv`](http://man7.org/linux/man-pages/man1/iconv.1.html) if part
+of a pipeline.
+
+## Byte Order Marks
+
+Some CSV files may be generated with, or contain a leading [Byte Order Mark](https://en.wikipedia.org/wiki/Byte_order_mark#UTF-8). This may cause issues parsing headers and/or data from your file. From Wikipedia:
+
+>The Unicode Standard permits the BOM in UTF-8, but does not require nor recommend its use. Byte order has no meaning in UTF-8.
+
+To use this module with a file containing a BOM, please use a module like [strip-bom-stream](https://github.com/sindresorhus/strip-bom-stream) in your pipeline:
+
+```js
+const fs = require('fs');
+
+const csv = require('csv-parser');
+const stripBom = require('strip-bom-stream');
+
+fs.createReadStream('data.csv')
+  .pipe(stripBom())
+  .pipe(csv())
+  ...
+```
+
+When using the CLI, the BOM can be removed by first running:
+
+```console
+$ sed $'s/\xEF\xBB\xBF//g' data.csv
+```
+
+## Meta
+
+[CONTRIBUTING](./.github/CONTRIBUTING)
+
+[LICENSE (MIT)](./LICENSE)
diff --git a/node_modules/csv-parser/bin/csv-parser b/node_modules/csv-parser/bin/csv-parser
new file mode 100644
index 0000000000000000000000000000000000000000..f895a8ebd8d9d556ee2bcf06263c0cdbbfc6ab33
--- /dev/null
+++ b/node_modules/csv-parser/bin/csv-parser
@@ -0,0 +1,94 @@
+#!/usr/bin/env node
+
+const { EOL } = require('os')
+const minimist = require('minimist')
+const { Transform } = require('stream');
+const fs = require('fs')
+const csv = require('../')
+const pkg = require('../package.json')
+
+const argv = minimist(process.argv, {
+  alias: {
+    c: 'skipComments',
+    e: 'escape',
+    h: 'headers',
+    o: 'output',
+    q: 'quote',
+    l: 'skipLines',
+    s: 'separator',
+    v: 'version'
+  },
+  default: {
+    e: '"',
+    q: '"',
+    s: ','
+  },
+  boolean: ['version', 'help']
+})
+
+const [,, filename] = argv._
+
+if (argv.version) {
+  console.log(pkg.version)
+  process.exit(0)
+}
+
+if (argv.help || (process.stdin.isTTY && !filename)) {
+  console.error(`Usage: csv-parser [filename?] [options]
+  --escape,-e         Set the escape character (defaults to quote value)
+  --headers,-h        Explicitly specify csv headers as a comma separated list
+  --help              Show this help
+  --output,-o         Set output file. Defaults to stdout
+  --quote,-q          Set the quote character ('"' by default)
+  --remove            Remove headers from output
+  --separator,-s      Set the separator character ("," by default)
+  --skipComments,-c   Skip CSV comments that begin with '#'. Set a value to change the comment character.
+  --skipLines,-l      Set the number of lines to skip to before parsing headers
+  --strict            Require column length match headers length
+  --version,-v        Print out the installed version
+`)
+  process.exit(1)
+}
+
+let input
+const output = (argv.output && argv.output !== '-') ? fs.createWriteStream(argv.output) : process.stdout
+const options = {
+  separator: argv.separator,
+  strict: argv.strict,
+  skipComments: argv.skipComments,
+  skipLines: argv.skipLines
+}
+
+if (argv.headers) {
+  options.headers = argv.headers.toString().split(argv.separator)
+}
+
+if (argv.remove) {
+  const removeHeaders = argv.remove.split(',')
+  options.mapHeaders = (name, i) => {
+    return removeHeaders.indexOf(name) === -1 ? name : null
+  }
+}
+
+if (filename === '-' || !filename) {
+  input = process.stdin
+} else if (fs.existsSync(filename)) {
+  input = fs.createReadStream(filename)
+} else {
+  console.error(`File: ${filename} does not exist`)
+  process.exit(2)
+}
+
+const serialize = () => {
+  return new Transform({
+    objectMode: true,
+    transform(obj, enc, cb) {
+      cb(null, JSON.stringify(obj) + EOL)
+    }
+  });
+}
+
+input
+  .pipe(csv(options))
+  .pipe(serialize())
+  .pipe(output)
diff --git a/node_modules/csv-parser/index.d.ts b/node_modules/csv-parser/index.d.ts
new file mode 100644
index 0000000000000000000000000000000000000000..a6a400df34f5b98ae809f7ba7b7f675c3a273f0b
--- /dev/null
+++ b/node_modules/csv-parser/index.d.ts
@@ -0,0 +1,146 @@
+/// <reference types="node"/>
+import { Transform } from 'stream';
+
+declare namespace csvParser {
+  type CsvParser = Transform;
+
+  interface Options {
+    /**
+     * A single-character string used to specify the character used to escape strings in a CSV row.
+     *
+     * @default '"'
+     */
+    readonly escape?: string;
+
+    /**
+     * Specifies the headers to use. Headers define the property key for each value in a CSV row. If no `headers` option is provided, `csv-parser` will use the first line in a CSV file as the header specification.
+     *
+     * If `false`, specifies that the first row in a data file does _not_ contain headers, and instructs the parser to use the row index as the key for each row.
+     *
+     * Suppose you have a CSV file `data.csv` which contains the data:
+     *
+     * ```
+NAME,AGE
+Daffy Duck,24
+Bugs Bunny,22
+```
+     * Using `headers: false` with the data from `data.csv` would yield:
+     * ```
+[
+  { '0': 'Daffy Duck', '1': 24 },
+  { '0': 'Bugs Bunny', '1': 22 }
+]
+```
+     */
+    readonly headers?: ReadonlyArray<string> | boolean;
+
+    /**
+     * A function that can be used to modify the values of each header. Return `null` to remove the header, and it's column, from the results.
+     *
+     * @example
+     *
+     * csv({
+     *   mapHeaders: ({ header, index }) => header.toLowerCase()
+     * });
+     */
+    readonly mapHeaders?: (args: { header: string; index: number }) => string | null;
+
+    /**
+     * A function that can be used to modify the value of each column value.
+     *
+     * @example
+     *
+     * csv({
+     *   mapValues: ({ header, index, value }) => value.toLowerCase()
+     * });
+     */
+    readonly mapValues?: (args: { header: string; index: number; value: any }) => any;
+
+    /**
+     * Specifies a single-character string to denote the end of a line in a CSV file.
+     *
+     * @default '\n'
+     */
+    readonly newline?: string;
+
+    /**
+     * Specifies a single-character string to denote a quoted string.
+     *
+     * @default '"'
+     */
+    readonly quote?: string;
+
+    /**
+     * If `true`, instructs the parser not to decode UTF-8 strings.
+     */
+    readonly raw?: boolean;
+
+    /**
+     * Specifies a single-character string to use as the column separator for each row.
+     *
+     * @default ','
+     */
+    readonly separator?: string;
+
+    /**
+     * Instructs the parser to ignore lines which represent comments in a CSV file. Since there is no specification that dictates what a CSV comment looks like, comments should be considered non-standard. The "most common" character used to signify a comment in a CSV file is `"#"`. If this option is set to `true`, lines which begin with `#` will be skipped. If a custom character is needed to denote a commented line, this option may be set to a string which represents the leading character(s) signifying a comment line.
+     *
+     * @default false
+     */
+    readonly skipComments?: boolean | string;
+
+    /**
+     * Specifies the number of lines at the beginning of a data file that the parser should skip over, prior to parsing headers.
+     *
+     * @default 0
+     */
+    readonly skipLines?: number;
+
+    /**
+     * Maximum number of bytes per row. An error is thrown if a line exeeds this value. The default value is on 8 peta byte.
+     *
+     * @default Number.MAX_SAFE_INTEGER
+     */
+    readonly maxRowBytes?: number;
+
+    /**
+     * If `true`, instructs the parser that the number of columns in each row must match the number of `headers` specified.
+     */
+    readonly strict?: boolean;
+  }
+}
+
+/**
+ * Streaming CSV parser that aims for maximum speed as well as compatibility with the [csv-spectrum](https://npmjs.org/csv-spectrum) CSV acid test suite.
+ *
+ * @param optionsOrHeaders - As an alternative to passing an `options` object, you may pass an `Array[String]` which specifies the headers to use. If you need to specify options _and_ headers, please use the the object notation with the `headers` property.
+ *
+ * @example
+ *
+ * // data.csv:
+ * //
+ * // NAME,AGE
+ * // Daffy Duck,24
+ * // Bugs Bunny,22
+ *
+ * import csv = require('csv-parser');
+ * import * as fs from 'fs';
+ *
+ * const results = [];
+ *
+ * fs.createReadStream('data.csv')
+ *   .pipe(csv())
+ *   .on('data', (data) => results.push(data))
+ *   .on('end', () => {
+ *     console.log(results);
+ *     // [
+ *     //   { NAME: 'Daffy Duck', AGE: '24' },
+ *     //   { NAME: 'Bugs Bunny', AGE: '22' }
+ *     // ]
+ *   });
+ */
+declare const csvParser: (
+  optionsOrHeaders?: csvParser.Options | ReadonlyArray<string>
+) => csvParser.CsvParser;
+
+export = csvParser;
diff --git a/node_modules/csv-parser/index.js b/node_modules/csv-parser/index.js
new file mode 100644
index 0000000000000000000000000000000000000000..2a6a2daa99a02936f8b4c17be532a0c7f87a1515
--- /dev/null
+++ b/node_modules/csv-parser/index.js
@@ -0,0 +1,275 @@
+const { Transform } = require('stream')
+
+const [cr] = Buffer.from('\r')
+const [nl] = Buffer.from('\n')
+const defaults = {
+  escape: '"',
+  headers: null,
+  mapHeaders: ({ header }) => header,
+  mapValues: ({ value }) => value,
+  newline: '\n',
+  quote: '"',
+  raw: false,
+  separator: ',',
+  skipComments: false,
+  skipLines: null,
+  maxRowBytes: Number.MAX_SAFE_INTEGER,
+  strict: false
+}
+
+class CsvParser extends Transform {
+  constructor (opts = {}) {
+    super({ objectMode: true, highWaterMark: 16 })
+
+    if (Array.isArray(opts)) opts = { headers: opts }
+
+    const options = Object.assign({}, defaults, opts)
+
+    options.customNewline = options.newline !== defaults.newline
+
+    for (const key of ['newline', 'quote', 'separator']) {
+      if (typeof options[key] !== 'undefined') {
+        ([options[key]] = Buffer.from(options[key]))
+      }
+    }
+
+    // if escape is not defined on the passed options, use the end value of quote
+    options.escape = (opts || {}).escape ? Buffer.from(options.escape)[0] : options.quote
+
+    this.state = {
+      empty: options.raw ? Buffer.alloc(0) : '',
+      escaped: false,
+      first: true,
+      lineNumber: 0,
+      previousEnd: 0,
+      rowLength: 0,
+      quoted: false
+    }
+
+    this._prev = null
+
+    if (options.headers === false) {
+      // enforce, as the column length check will fail if headers:false
+      options.strict = false
+    }
+
+    if (options.headers || options.headers === false) {
+      this.state.first = false
+    }
+
+    this.options = options
+    this.headers = options.headers
+  }
+
+  parseCell (buffer, start, end) {
+    const { escape, quote } = this.options
+    // remove quotes from quoted cells
+    if (buffer[start] === quote && buffer[end - 1] === quote) {
+      start++
+      end--
+    }
+
+    let y = start
+
+    for (let i = start; i < end; i++) {
+      // check for escape characters and skip them
+      if (buffer[i] === escape && i + 1 < end && buffer[i + 1] === quote) {
+        i++
+      }
+
+      if (y !== i) {
+        buffer[y] = buffer[i]
+      }
+      y++
+    }
+
+    return this.parseValue(buffer, start, y)
+  }
+
+  parseLine (buffer, start, end) {
+    const { customNewline, escape, mapHeaders, mapValues, quote, separator, skipComments, skipLines } = this.options
+
+    end-- // trim newline
+    if (!customNewline && buffer.length && buffer[end - 1] === cr) {
+      end--
+    }
+
+    const comma = separator
+    const cells = []
+    let isQuoted = false
+    let offset = start
+
+    if (skipComments) {
+      const char = typeof skipComments === 'string' ? skipComments : '#'
+      if (buffer[start] === Buffer.from(char)[0]) {
+        return
+      }
+    }
+
+    const mapValue = (value) => {
+      if (this.state.first) {
+        return value
+      }
+
+      const index = cells.length
+      const header = this.headers[index]
+
+      return mapValues({ header, index, value })
+    }
+
+    for (let i = start; i < end; i++) {
+      const isStartingQuote = !isQuoted && buffer[i] === quote
+      const isEndingQuote = isQuoted && buffer[i] === quote && i + 1 <= end && buffer[i + 1] === comma
+      const isEscape = isQuoted && buffer[i] === escape && i + 1 < end && buffer[i + 1] === quote
+
+      if (isStartingQuote || isEndingQuote) {
+        isQuoted = !isQuoted
+        continue
+      } else if (isEscape) {
+        i++
+        continue
+      }
+
+      if (buffer[i] === comma && !isQuoted) {
+        let value = this.parseCell(buffer, offset, i)
+        value = mapValue(value)
+        cells.push(value)
+        offset = i + 1
+      }
+    }
+
+    if (offset < end) {
+      let value = this.parseCell(buffer, offset, end)
+      value = mapValue(value)
+      cells.push(value)
+    }
+
+    if (buffer[end - 1] === comma) {
+      cells.push(mapValue(this.state.empty))
+    }
+
+    const skip = skipLines && skipLines > this.state.lineNumber
+    this.state.lineNumber++
+
+    if (this.state.first && !skip) {
+      this.state.first = false
+      this.headers = cells.map((header, index) => mapHeaders({ header, index }))
+
+      this.emit('headers', this.headers)
+      return
+    }
+
+    if (!skip && this.options.strict && cells.length !== this.headers.length) {
+      const e = new RangeError('Row length does not match headers')
+      this.emit('error', e)
+    } else {
+      if (!skip) this.writeRow(cells)
+    }
+  }
+
+  parseValue (buffer, start, end) {
+    if (this.options.raw) {
+      return buffer.slice(start, end)
+    }
+
+    return buffer.toString('utf-8', start, end)
+  }
+
+  writeRow (cells) {
+    const headers = (this.headers === false) ? cells.map((value, index) => index) : this.headers
+
+    const row = cells.reduce((o, cell, index) => {
+      const header = headers[index]
+      if (header === null) return o // skip columns
+      if (header !== undefined) {
+        o[header] = cell
+      } else {
+        o[`_${index}`] = cell
+      }
+      return o
+    }, {})
+
+    this.push(row)
+  }
+
+  _flush (cb) {
+    if (this.state.escaped || !this._prev) return cb()
+    this.parseLine(this._prev, this.state.previousEnd, this._prev.length + 1) // plus since online -1s
+    cb()
+  }
+
+  _transform (data, enc, cb) {
+    if (typeof data === 'string') {
+      data = Buffer.from(data)
+    }
+
+    const { escape, quote } = this.options
+    let start = 0
+    let buffer = data
+
+    if (this._prev) {
+      start = this._prev.length
+      buffer = Buffer.concat([this._prev, data])
+      this._prev = null
+    }
+
+    const bufferLength = buffer.length
+
+    for (let i = start; i < bufferLength; i++) {
+      const chr = buffer[i]
+      const nextChr = i + 1 < bufferLength ? buffer[i + 1] : null
+
+      this.state.rowLength++
+      if (this.state.rowLength > this.options.maxRowBytes) {
+        return cb(new Error('Row exceeds the maximum size'))
+      }
+
+      if (!this.state.escaped && chr === escape && nextChr === quote && i !== start) {
+        this.state.escaped = true
+        continue
+      } else if (chr === quote) {
+        if (this.state.escaped) {
+          this.state.escaped = false
+          // non-escaped quote (quoting the cell)
+        } else {
+          this.state.quoted = !this.state.quoted
+        }
+        continue
+      }
+
+      if (!this.state.quoted) {
+        if (this.state.first && !this.options.customNewline) {
+          if (chr === nl) {
+            this.options.newline = nl
+          } else if (chr === cr) {
+            if (nextChr !== nl) {
+              this.options.newline = cr
+            }
+          }
+        }
+
+        if (chr === this.options.newline) {
+          this.parseLine(buffer, this.state.previousEnd, i + 1)
+          this.state.previousEnd = i + 1
+          this.state.rowLength = 0
+        }
+      }
+    }
+
+    if (this.state.previousEnd === bufferLength) {
+      this.state.previousEnd = 0
+      return cb()
+    }
+
+    if (bufferLength - this.state.previousEnd < data.length) {
+      this._prev = data
+      this.state.previousEnd -= (bufferLength - data.length)
+      return cb()
+    }
+
+    this._prev = buffer
+    cb()
+  }
+}
+
+module.exports = (opts) => new CsvParser(opts)
diff --git a/node_modules/csv-parser/package.json b/node_modules/csv-parser/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..7e22065ac0f6fa5e502c30b076600853c4c2fdfe
--- /dev/null
+++ b/node_modules/csv-parser/package.json
@@ -0,0 +1,92 @@
+{
+  "name": "csv-parser",
+  "version": "3.0.0",
+  "description": "Streaming CSV parser that aims for maximum speed as well as compatibility with the csv-spectrum test suite",
+  "license": "MIT",
+  "repository": "mafintosh/csv-parser",
+  "author": "mafintosh",
+  "maintainers": [
+    "Andrew Powell <andrew@shellscape.org>"
+  ],
+  "homepage": "https://github.com/mafintosh/csv-parser",
+  "bugs": "https://github.com/mafintosh/csv-parser/issues",
+  "bin": {
+    "csv-parser": "./bin/csv-parser"
+  },
+  "main": "index.js",
+  "files": [
+    "bin/csv-parser",
+    "index.js",
+    "index.d.ts"
+  ],
+  "engines": {
+    "node": ">= 10"
+  },
+  "scripts": {
+    "bench": "bin/bench",
+    "commitlint": "commitlint",
+    "coverage": "nyc npm run test && nyc report --reporter=text-lcov > coverage.lcov",
+    "lint": "eslint .",
+    "lint-staged": "lint-staged",
+    "security": "npm audit",
+    "test": "ava && tsd"
+  },
+  "dependencies": {
+    "minimist": "^1.2.0"
+  },
+  "devDependencies": {
+    "@commitlint/cli": "^8.2.0",
+    "@commitlint/config-conventional": "^8.0.0",
+    "@types/node": "^12.0.0",
+    "ava": "^2.4.0",
+    "bops": "^1.0.0",
+    "chalk": "^2.4.2",
+    "concat-stream": "^2.0.0",
+    "csv-spectrum": "^1.0.0",
+    "eslint": "^6.4.0",
+    "eslint-config-standard": "^14.1.0",
+    "eslint-plugin-import": "^2.18.2",
+    "eslint-plugin-node": "^10.0.0",
+    "eslint-plugin-promise": "^4.1.1",
+    "eslint-plugin-standard": "^4.0.0",
+    "execa": "^2.1.0",
+    "globby": "^10.0.1",
+    "husky": "^3.0.0",
+    "lint-staged": "^9.0.2",
+    "loud-rejection": "^2.1.0",
+    "nyc": "^14.1.1",
+    "pre-commit": "^1.2.2",
+    "strip-ansi": "^5.2.0",
+    "text-table": "^0.2.0",
+    "time-span": "^3.1.0",
+    "tsd": "^0.8.0"
+  },
+  "directories": {
+    "example": "examples",
+    "test": "test"
+  },
+  "keywords": [
+    "csv",
+    "parser",
+    "fast",
+    "json"
+  ],
+  "ava": {
+    "files": [
+      "!**/fixtures/**",
+      "!**/helpers/**"
+    ]
+  },
+  "husky": {
+    "hooks": {
+      "commit-msg": "commitlint -e $HUSKY_GIT_PARAMS"
+    }
+  },
+  "lint-staged": {
+    "*.js": [
+      "eslint --fix",
+      "git add"
+    ]
+  },
+  "pre-commit": "lint-staged"
+}
diff --git a/package-lock.json b/package-lock.json
index f27972827b0b7473c9582a7f7690b5102e3a0309..91c5f9f31e4df6e2aca92bb5be856a52a61dd03b 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -13,6 +13,7 @@
         "bootstrap": "^5.2.3",
         "bootstrap-icons": "^1.10.3",
         "chart.js": "^4.2.1",
+        "csv-parser": "^3.0.0",
         "datatables.net": "^1.13.4",
         "datatables.net-dt": "^1.13.4",
         "electron-squirrel-startup": "^1.0.0",
@@ -2027,6 +2028,20 @@
         "node": ">=12.10"
       }
     },
+    "node_modules/csv-parser": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/csv-parser/-/csv-parser-3.0.0.tgz",
+      "integrity": "sha512-s6OYSXAK3IdKqYO33y09jhypG/bSDHPuyCme/IdEHfWpLf/jKcpitVFyOC6UemgGk8v7Q5u2XE0vvwmanxhGlQ==",
+      "dependencies": {
+        "minimist": "^1.2.0"
+      },
+      "bin": {
+        "csv-parser": "bin/csv-parser"
+      },
+      "engines": {
+        "node": ">= 10"
+      }
+    },
     "node_modules/datatables.net": {
       "version": "1.13.4",
       "resolved": "https://registry.npmjs.org/datatables.net/-/datatables.net-1.13.4.tgz",
@@ -4813,7 +4828,6 @@
       "version": "1.2.7",
       "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz",
       "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==",
-      "dev": true,
       "funding": {
         "url": "https://github.com/sponsors/ljharb"
       }
@@ -8497,6 +8511,14 @@
       "integrity": "sha512-MEzGfZo0rqE10O/B+AEcCSJLZsrWuRUvmqJTqHNqBtALhaJc3E3ixLGLJNTRzEA2K34wbmOHC4fwYs9sVsdcCA==",
       "dev": true
     },
+    "csv-parser": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/csv-parser/-/csv-parser-3.0.0.tgz",
+      "integrity": "sha512-s6OYSXAK3IdKqYO33y09jhypG/bSDHPuyCme/IdEHfWpLf/jKcpitVFyOC6UemgGk8v7Q5u2XE0vvwmanxhGlQ==",
+      "requires": {
+        "minimist": "^1.2.0"
+      }
+    },
     "datatables.net": {
       "version": "1.13.4",
       "resolved": "https://registry.npmjs.org/datatables.net/-/datatables.net-1.13.4.tgz",
@@ -10581,8 +10603,7 @@
     "minimist": {
       "version": "1.2.7",
       "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz",
-      "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==",
-      "dev": true
+      "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g=="
     },
     "minipass": {
       "version": "3.3.6",
diff --git a/package.json b/package.json
index a62ab129faefad472cf2e8afdde7ffe9ddae992f..6e12525196de4c256917fac1637852b670dedb8a 100644
--- a/package.json
+++ b/package.json
@@ -21,6 +21,7 @@
     "bootstrap": "^5.2.3",
     "bootstrap-icons": "^1.10.3",
     "chart.js": "^4.2.1",
+    "csv-parser": "^3.0.0",
     "datatables.net": "^1.13.4",
     "datatables.net-dt": "^1.13.4",
     "electron-squirrel-startup": "^1.0.0",
diff --git a/src/csv_upload.js b/src/csv_upload.js
new file mode 100644
index 0000000000000000000000000000000000000000..5fa28c40d6907989257ca51aebc46b696be48eab
--- /dev/null
+++ b/src/csv_upload.js
@@ -0,0 +1,135 @@
+const fs = require('fs');
+const csv = require('csv-parser');
+const sql = require('mssql');
+
+const config = {
+    server: 'uwe-sdgp.database.windows.net',
+    database: 'feeding_inc',
+    user: 'fi_developer',
+    password: 'Eggyweggy156',
+    port: 1433,
+    options: {
+        encrypt: true
+    }
+};
+
+const uploadButton = document.querySelector('#csv_upload_btn');
+console.log(uploadButton);
+
+uploadButton.addEventListener('click', async (event) => {
+    event.preventDefault();
+
+    const csv_file = document.querySelector('#csv_file_upload').files[0];
+    const stream = fs.createReadStream(csv_file.path).pipe(csv());
+
+    console.log(stream);
+
+    const connection = await sql.connect(config);
+
+    const transaction = new sql.Transaction(connection);
+    await transaction.begin();
+
+    try {
+        for await (const row of stream) {
+            const request = new sql.Request(transaction);
+
+            request.input('encounterId', sql.Int, row.encounterId);
+            request.input('end_tidal_co2', sql.Int, row.end_tidal_co2 ? parseInt(row.end_tidal_co2) : null);
+            request.input('feed_vol', sql.Int, row.feed_vol ? parseInt(row.feed_vol) : null);
+            request.input('feed_vol_adm', sql.Int, row.feed_vol_adm ? parseInt(row.feed_vol_adm) : null);
+            request.input('fio2', sql.Int, row.fio2 ? parseInt(row.fio2) : null);
+            request.input('fio2_ratio', sql.Int, row.fio2_ratio ? parseInt(row.fio2_ratio) : null);
+            request.input('insp_time', sql.Int, row.insp_time ? parseInt(row.insp_time) : null);
+            request.input('oxygen_flow_rate', sql.Int, row.oxygen_flow_rate ? parseInt(row.oxygen_flow_rate) : null);
+            request.input('peep', sql.Int, row.peep ? parseInt(row.peep) : null);
+            request.input('pip', sql.Int, row.pip ? parseInt(row.pip) : null);
+            request.input('resp_rate', sql.Int, row.resp_rate ? parseInt(row.resp_rate) : null);
+            request.input('sip', sql.Int, row.sip ? parseInt(row.sip) : null);
+            request.input('tidal_vol', sql.Int, row.tidal_vol ? parseInt(row.tidal_vol) : null);
+            request.input('tidal_vol_actual', sql.Int, row.tidal_vol_actual ? parseInt(row.tidal_vol_actual) : null);
+            request.input('tidal_vol_kg', sql.Int, row.tidal_vol_kg ? parseInt(row.tidal_vol_kg) : null);
+            request.input('tidal_vol_spon', sql.Int, row.tidal_vol_spon ? parseInt(row.tidal_vol_spon) : null);
+            request.input('bmi', sql.Int, row.bmi ? parseInt(row.bmi) : null);
+            request.input('referral', sql.Int, row.referral ? parseInt(row.referral) : null);
+            console.log(request);
+            
+            //await request.query();
+        }
+        // const request = new sql.Request(transaction);
+        // await request.query("SET IDENTITY_INSERT fid_patients ON;\
+        // IF EXISTS (SELECT 1 FROM fid_patients WHERE patient_id = '1')\
+        // BEGIN\
+        //     UPDATE fid_patients\
+        //     SET patient_end_tidal_co2 = '1',\
+        //         patient_feed_vol = '1',\
+        //         patient_feed_vol_adm = '1',\
+        //         patient_fio2 = '1',\
+        //         patient_fio2_ratio = '1',\
+        //         patient_insp_time = '1',\
+        //         patient_oxygen_flow_rate = '1',\
+        //         patient_peep = '1',\
+        //         patient_pip = '1',\
+        //         patient_resp_rate = '1',\
+        //         patient_sip = '1',\
+        //         patient_tidal_vol = '1',\
+        //         patient_tidal_vol_actual = '1',\
+        //         patient_tidal_vol_kg = '1',\
+        //         patient_tidal_vol_spon = '1',\
+        //         patient_bmi = '1',\
+        //         patient_referral = '1',\
+        //         user_id = '1' \
+        //     WHERE patient_id = '1';\
+        // END\
+        // ELSE\
+        // BEGIN\
+        //     INSERT INTO fid_patients (patient_id,\
+        //                 patient_end_tidal_co2,\
+        //                 patient_feed_vol,\
+        //                 patient_feed_vol_adm,\
+        //                 patient_fio2,\
+        //                 patient_fio2_ratio,\
+        //                 patient_insp_time,\
+        //                 patient_oxygen_flow_rate,\
+        //                 patient_peep,\
+        //                 patient_pip,\
+        //                 patient_resp_rate,\
+        //                 patient_sip,\
+        //                 patient_tidal_vol,\
+        //                 patient_tidal_vol_actual,\
+        //                 patient_tidal_vol_kg,\
+        //                 patient_tidal_vol_spon,\
+        //                 patient_bmi,\
+        //                 patient_referral,\
+        //                 user_id)\
+        //     VALUES ('1',\
+        //             '2',\
+        //             '3',\
+        //             '4',\
+        //             '5',\
+        //             '6',\
+        //             '7',\
+        //             '8',\
+        //             '9',\
+        //             '10',\
+        //             '11',\
+        //             '12',\
+        //             '13',\
+        //             '14',\
+        //             '15',\
+        //             '16',\
+        //             '17',\
+        //             '18',\
+        //             '19');\
+        // END");
+
+        // await transaction.commit();
+        console.log('data insertion successful');
+    }
+    catch (error) {
+        console.error('Error: ', error);
+        await transaction.rollback();
+    }
+    finally {
+        await connection.close();
+    }
+});
\ No newline at end of file
diff --git a/src/upload_csv.html b/src/upload_csv.html
index 428a7b667c5c0f33433f67ae363c1abac133a79b..6fee8531d2150c83bbd8be645c2c1d878fe86eb1 100644
--- a/src/upload_csv.html
+++ b/src/upload_csv.html
@@ -4,7 +4,7 @@
         <meta charset="UTF-8" />
         <title>FeedingInc</title>
         <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.7.1/jquery.min.js" type="text/javascript"></script>
-  
+        
         <link rel="stylesheet" href="index.css" />
         <link rel="stylesheet" href="../node_modules/bootstrap/dist/css/bootstrap.min.css">
         <link rel="stylesheet" href="../node_modules/bootstrap-icons/font/bootstrap-icons.css">
@@ -83,7 +83,7 @@
     </body>
     <script>
         $('#csv_file_upload').change( function(){
-            const csv_file = document.getElementById('csv_file_upload').files[0]
+            const csv_file = document.getElementById('csv_file_upload').files[0];
     
             var res=$('#csv_file_upload').val();
             var arr = res.split("\\");
@@ -180,8 +180,6 @@
                     for (let i = 0; i < expectedHeaders.length; i++) {
                         if (headers[i].trim() !== expectedHeaders[i]) {
                             reject(new Error(`The header at position ${i+1} does not match the expected header.`));
-                            console.log(headers[i]);
-                            console.log(expectedHeaders[i]);
                         }
                     }
 
@@ -194,4 +192,6 @@
             });
         }
     </script>
+    <!-- External JavaScript to upload CSV file data to database -->
+    <script src="csv_upload.js"></script>
 </html>
\ No newline at end of file