diff --git a/UFCFVQ-15-M_Programming_Task_1_submit.ipynb b/UFCFVQ-15-M_Programming_Task_1_submit.ipynb
index 9d4d2e7392662aacc6c5d17cf070600db98f5175..c85264fc076355a4eeb78eda1c66fda1eb5c81d3 100644
--- a/UFCFVQ-15-M_Programming_Task_1_submit.ipynb
+++ b/UFCFVQ-15-M_Programming_Task_1_submit.ipynb
@@ -51,7 +51,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": 1,
    "metadata": {
     "deletable": false
    },
@@ -62,7 +62,7 @@
        "47.62"
       ]
      },
-     "execution_count": 2,
+     "execution_count": 1,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -81,7 +81,7 @@
     "    except ZeroDivisionError:\n",
     "        print(\"Error: Division by zero. List is empty\")\n",
     "    except TypeError:\n",
-    "        print(\"Error: Invalid typ in list. List must contain only numbers\")\n",
+    "        print(\"Error: Invalid type in list. List must contain only numbers\")\n",
     "    except:\n",
     "        print(\"Error with list of numbers. Please check list\")\n",
     "FR1_mean(list)\n"
@@ -108,7 +108,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": 5,
    "metadata": {
     "deletable": false
    },
@@ -136,7 +136,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 27,
+   "execution_count": 4,
    "metadata": {},
    "outputs": [
     {
@@ -176,7 +176,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": 10,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -197,7 +197,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 29,
+   "execution_count": 11,
    "metadata": {},
    "outputs": [
     {
@@ -234,7 +234,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 5,
+   "execution_count": 12,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -278,28 +278,27 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 7,
+   "execution_count": 13,
    "metadata": {},
    "outputs": [
     {
-     "data": {
-      "text/plain": [
-       "0.8984458631125747"
-      ]
-     },
-     "execution_count": 7,
-     "metadata": {},
-     "output_type": "execute_result"
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "0.8984458631125747\n",
+      "[[1.         0.89844586]\n",
+      " [0.89844586 1.        ]]\n"
+     ]
     }
    ],
    "source": [
     "#testing FR4_pearsonCorrCoef function against numpy corrcoef\n",
     "x = [1, 2, 3, 5]\n",
     "y = [1, 5, 7, 8]\n",
-    "FR4_pearsonCorrCoef(x, y)\n",
+    "print(FR4_pearsonCorrCoef(x, y))\n",
     "\n",
     "import numpy as np\n",
-    "np.corrcoef(x, y)\n"
+    "print(np.corrcoef(x, y))\n"
    ]
   },
   {
@@ -323,7 +322,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 10,
+   "execution_count": 16,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -336,7 +335,7 @@
     "    my_dict = FR3_read_csv_into_dictionary(filename)\n",
     "\n",
     "    # Iterate through dictionary to calculate PCC for all combinations of variables, using FR4_pearsonCorrCoef function\n",
-    "    PCC_list_of_tuples = [(variable, variable2, FR4_pearsonCorrCoef(my_dict[variable], my_dict[variable2]))\n",
+    "    PCC_list_of_tuples = [(variable, variable2, round(FR4_pearsonCorrCoef(my_dict[variable], my_dict[variable2]), 5))\n",
     "           for variable in my_dict\n",
     "           for variable2 in my_dict]\n",
     "    #print(PCC_list_of_tuples)\n",
@@ -346,121 +345,20 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 11,
+   "execution_count": 18,
    "metadata": {},
    "outputs": [
     {
-     "data": {
-      "text/plain": [
-       "[('age', 'age', 1.0),\n",
-       " ('age', 'pop', -0.026709155288578715),\n",
-       " ('age', 'share_white', 0.19960545971348648),\n",
-       " ('age', 'share_black', -0.08806861565745706),\n",
-       " ('age', 'share_hispanic', -0.13679338429773677),\n",
-       " ('age', 'personal_income', 0.03247940547175454),\n",
-       " ('age', 'household_income', 0.07122884715312655),\n",
-       " ('age', 'poverty_rate', -0.11501578217612785),\n",
-       " ('age', 'unemployment_rate', -0.08924108157207414),\n",
-       " ('age', 'uni_education_25+\\n', -0.015551355328918883),\n",
-       " ('pop', 'age', -0.026709155288578715),\n",
-       " ('pop', 'pop', 1.0000000000000002),\n",
-       " ('pop', 'share_white', 0.0755124299211167),\n",
-       " ('pop', 'share_black', -0.15619530865683967),\n",
-       " ('pop', 'share_hispanic', 0.06195481228740331),\n",
-       " ('pop', 'personal_income', 0.20485917876690452),\n",
-       " ('pop', 'household_income', 0.3051738076744344),\n",
-       " ('pop', 'poverty_rate', -0.29132966650032943),\n",
-       " ('pop', 'unemployment_rate', -0.21783723363180305),\n",
-       " ('pop', 'uni_education_25+\\n', 0.11698059897943787),\n",
-       " ('share_white', 'age', 0.19960545971348648),\n",
-       " ('share_white', 'pop', 0.0755124299211167),\n",
-       " ('share_white', 'share_white', 1.0000000000000002),\n",
-       " ('share_white', 'share_black', -0.5449723033049567),\n",
-       " ('share_white', 'share_hispanic', -0.5774407139938217),\n",
-       " ('share_white', 'personal_income', 0.35839184199994806),\n",
-       " ('share_white', 'household_income', 0.32212297679344865),\n",
-       " ('share_white', 'poverty_rate', -0.49770809057624715),\n",
-       " ('share_white', 'unemployment_rate', -0.3896695027097979),\n",
-       " ('share_white', 'uni_education_25+\\n', 0.33416476681871427),\n",
-       " ('share_black', 'age', -0.08806861565745706),\n",
-       " ('share_black', 'pop', -0.15619530865683967),\n",
-       " ('share_black', 'share_white', -0.5449723033049567),\n",
-       " ('share_black', 'share_black', 1.0),\n",
-       " ('share_black', 'share_hispanic', -0.2624176266788398),\n",
-       " ('share_black', 'personal_income', -0.2824788256535901),\n",
-       " ('share_black', 'household_income', -0.34673961691580957),\n",
-       " ('share_black', 'poverty_rate', 0.4306656974717496),\n",
-       " ('share_black', 'unemployment_rate', 0.4836283024208505),\n",
-       " ('share_black', 'uni_education_25+\\n', -0.2129601024183306),\n",
-       " ('share_hispanic', 'age', -0.13679338429773677),\n",
-       " ('share_hispanic', 'pop', 0.06195481228740331),\n",
-       " ('share_hispanic', 'share_white', -0.5774407139938217),\n",
-       " ('share_hispanic', 'share_black', -0.2624176266788398),\n",
-       " ('share_hispanic', 'share_hispanic', 1.0000000000000002),\n",
-       " ('share_hispanic', 'personal_income', -0.2231256947246905),\n",
-       " ('share_hispanic', 'household_income', -0.13596088920701366),\n",
-       " ('share_hispanic', 'poverty_rate', 0.20829495353292043),\n",
-       " ('share_hispanic', 'unemployment_rate', 0.014748972805766968),\n",
-       " ('share_hispanic', 'uni_education_25+\\n', -0.2909783037426069),\n",
-       " ('personal_income', 'age', 0.03247940547175454),\n",
-       " ('personal_income', 'pop', 0.20485917876690452),\n",
-       " ('personal_income', 'share_white', 0.35839184199994806),\n",
-       " ('personal_income', 'share_black', -0.2824788256535901),\n",
-       " ('personal_income', 'share_hispanic', -0.2231256947246905),\n",
-       " ('personal_income', 'personal_income', 1.0),\n",
-       " ('personal_income', 'household_income', 0.8319631230491987),\n",
-       " ('personal_income', 'poverty_rate', -0.6959234082905974),\n",
-       " ('personal_income', 'unemployment_rate', -0.5049325127827728),\n",
-       " ('personal_income', 'uni_education_25+\\n', 0.7166080399373852),\n",
-       " ('household_income', 'age', 0.07122884715312655),\n",
-       " ('household_income', 'pop', 0.3051738076744344),\n",
-       " ('household_income', 'share_white', 0.32212297679344865),\n",
-       " ('household_income', 'share_black', -0.34673961691580957),\n",
-       " ('household_income', 'share_hispanic', -0.13596088920701366),\n",
-       " ('household_income', 'personal_income', 0.8319631230491987),\n",
-       " ('household_income', 'household_income', 1.0),\n",
-       " ('household_income', 'poverty_rate', -0.7541757449430393),\n",
-       " ('household_income', 'unemployment_rate', -0.5099954109970329),\n",
-       " ('household_income', 'uni_education_25+\\n', 0.6729008330623418),\n",
-       " ('poverty_rate', 'age', -0.11501578217612785),\n",
-       " ('poverty_rate', 'pop', -0.29132966650032943),\n",
-       " ('poverty_rate', 'share_white', -0.49770809057624715),\n",
-       " ('poverty_rate', 'share_black', 0.4306656974717496),\n",
-       " ('poverty_rate', 'share_hispanic', 0.20829495353292043),\n",
-       " ('poverty_rate', 'personal_income', -0.6959234082905974),\n",
-       " ('poverty_rate', 'household_income', -0.7541757449430393),\n",
-       " ('poverty_rate', 'poverty_rate', 1.0000000000000002),\n",
-       " ('poverty_rate', 'unemployment_rate', 0.5916868406520023),\n",
-       " ('poverty_rate', 'uni_education_25+\\n', -0.46033635796187616),\n",
-       " ('unemployment_rate', 'age', -0.08924108157207414),\n",
-       " ('unemployment_rate', 'pop', -0.21783723363180305),\n",
-       " ('unemployment_rate', 'share_white', -0.3896695027097979),\n",
-       " ('unemployment_rate', 'share_black', 0.4836283024208505),\n",
-       " ('unemployment_rate', 'share_hispanic', 0.014748972805766968),\n",
-       " ('unemployment_rate', 'personal_income', -0.5049325127827728),\n",
-       " ('unemployment_rate', 'household_income', -0.5099954109970329),\n",
-       " ('unemployment_rate', 'poverty_rate', 0.5916868406520023),\n",
-       " ('unemployment_rate', 'unemployment_rate', 1.0),\n",
-       " ('unemployment_rate', 'uni_education_25+\\n', -0.4663884373739078),\n",
-       " ('uni_education_25+\\n', 'age', -0.015551355328918883),\n",
-       " ('uni_education_25+\\n', 'pop', 0.11698059897943787),\n",
-       " ('uni_education_25+\\n', 'share_white', 0.33416476681871427),\n",
-       " ('uni_education_25+\\n', 'share_black', -0.2129601024183306),\n",
-       " ('uni_education_25+\\n', 'share_hispanic', -0.2909783037426069),\n",
-       " ('uni_education_25+\\n', 'personal_income', 0.7166080399373852),\n",
-       " ('uni_education_25+\\n', 'household_income', 0.6729008330623418),\n",
-       " ('uni_education_25+\\n', 'poverty_rate', -0.46033635796187616),\n",
-       " ('uni_education_25+\\n', 'unemployment_rate', -0.4663884373739078),\n",
-       " ('uni_education_25+\\n', 'uni_education_25+\\n', 1.0000000000000002)]"
-      ]
-     },
-     "execution_count": 11,
-     "metadata": {},
-     "output_type": "execute_result"
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "[('age', 'age', 1.0), ('age', 'pop', -0.02671), ('age', 'share_white', 0.19961), ('age', 'share_black', -0.08807), ('age', 'share_hispanic', -0.13679), ('age', 'personal_income', 0.03248), ('age', 'household_income', 0.07123), ('age', 'poverty_rate', -0.11502), ('age', 'unemployment_rate', -0.08924), ('age', 'uni_education_25+\\n', -0.01555), ('pop', 'age', -0.02671), ('pop', 'pop', 1.0), ('pop', 'share_white', 0.07551), ('pop', 'share_black', -0.1562), ('pop', 'share_hispanic', 0.06195), ('pop', 'personal_income', 0.20486), ('pop', 'household_income', 0.30517), ('pop', 'poverty_rate', -0.29133), ('pop', 'unemployment_rate', -0.21784), ('pop', 'uni_education_25+\\n', 0.11698), ('share_white', 'age', 0.19961), ('share_white', 'pop', 0.07551), ('share_white', 'share_white', 1.0), ('share_white', 'share_black', -0.54497), ('share_white', 'share_hispanic', -0.57744), ('share_white', 'personal_income', 0.35839), ('share_white', 'household_income', 0.32212), ('share_white', 'poverty_rate', -0.49771), ('share_white', 'unemployment_rate', -0.38967), ('share_white', 'uni_education_25+\\n', 0.33416), ('share_black', 'age', -0.08807), ('share_black', 'pop', -0.1562), ('share_black', 'share_white', -0.54497), ('share_black', 'share_black', 1.0), ('share_black', 'share_hispanic', -0.26242), ('share_black', 'personal_income', -0.28248), ('share_black', 'household_income', -0.34674), ('share_black', 'poverty_rate', 0.43067), ('share_black', 'unemployment_rate', 0.48363), ('share_black', 'uni_education_25+\\n', -0.21296), ('share_hispanic', 'age', -0.13679), ('share_hispanic', 'pop', 0.06195), ('share_hispanic', 'share_white', -0.57744), ('share_hispanic', 'share_black', -0.26242), ('share_hispanic', 'share_hispanic', 1.0), ('share_hispanic', 'personal_income', -0.22313), ('share_hispanic', 'household_income', -0.13596), ('share_hispanic', 'poverty_rate', 0.20829), ('share_hispanic', 'unemployment_rate', 0.01475), ('share_hispanic', 'uni_education_25+\\n', -0.29098), ('personal_income', 'age', 0.03248), ('personal_income', 'pop', 0.20486), ('personal_income', 'share_white', 0.35839), ('personal_income', 'share_black', -0.28248), ('personal_income', 'share_hispanic', -0.22313), ('personal_income', 'personal_income', 1.0), ('personal_income', 'household_income', 0.83196), ('personal_income', 'poverty_rate', -0.69592), ('personal_income', 'unemployment_rate', -0.50493), ('personal_income', 'uni_education_25+\\n', 0.71661), ('household_income', 'age', 0.07123), ('household_income', 'pop', 0.30517), ('household_income', 'share_white', 0.32212), ('household_income', 'share_black', -0.34674), ('household_income', 'share_hispanic', -0.13596), ('household_income', 'personal_income', 0.83196), ('household_income', 'household_income', 1.0), ('household_income', 'poverty_rate', -0.75418), ('household_income', 'unemployment_rate', -0.51), ('household_income', 'uni_education_25+\\n', 0.6729), ('poverty_rate', 'age', -0.11502), ('poverty_rate', 'pop', -0.29133), ('poverty_rate', 'share_white', -0.49771), ('poverty_rate', 'share_black', 0.43067), ('poverty_rate', 'share_hispanic', 0.20829), ('poverty_rate', 'personal_income', -0.69592), ('poverty_rate', 'household_income', -0.75418), ('poverty_rate', 'poverty_rate', 1.0), ('poverty_rate', 'unemployment_rate', 0.59169), ('poverty_rate', 'uni_education_25+\\n', -0.46034), ('unemployment_rate', 'age', -0.08924), ('unemployment_rate', 'pop', -0.21784), ('unemployment_rate', 'share_white', -0.38967), ('unemployment_rate', 'share_black', 0.48363), ('unemployment_rate', 'share_hispanic', 0.01475), ('unemployment_rate', 'personal_income', -0.50493), ('unemployment_rate', 'household_income', -0.51), ('unemployment_rate', 'poverty_rate', 0.59169), ('unemployment_rate', 'unemployment_rate', 1.0), ('unemployment_rate', 'uni_education_25+\\n', -0.46639), ('uni_education_25+\\n', 'age', -0.01555), ('uni_education_25+\\n', 'pop', 0.11698), ('uni_education_25+\\n', 'share_white', 0.33416), ('uni_education_25+\\n', 'share_black', -0.21296), ('uni_education_25+\\n', 'share_hispanic', -0.29098), ('uni_education_25+\\n', 'personal_income', 0.71661), ('uni_education_25+\\n', 'household_income', 0.6729), ('uni_education_25+\\n', 'poverty_rate', -0.46034), ('uni_education_25+\\n', 'unemployment_rate', -0.46639), ('uni_education_25+\\n', 'uni_education_25+\\n', 1.0)]\n"
+     ]
     }
    ],
    "source": [
-    "FR5_PCCs_from_csv('task1.csv')"
+    "FR5_output = FR5_PCCs_from_csv('task1.csv')\n",
+    "print(FR5_output)"
    ]
   },
   {
@@ -508,7 +406,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 251,
+   "execution_count": 20,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -528,7 +426,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 254,
+   "execution_count": 70,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -546,22 +444,22 @@
     "    row_headers = col_headers\n",
     "    \n",
     "    # calculate maximum column width in the data\n",
-    "    max_width = int(max_col_width(tup_list) * 3)\n",
+    "    max_width = int(max_col_width(tup_list) * 1.9)\n",
     "    \n",
     "    # create table string with top border based on padding character and maximum column width\n",
-    "    table_str = ' ' * max_width + pad_char * (max_width * (len(col_headers))) + pad_char * (len(col_headers)+1)+ '\\n' \n",
-    "    table_str += ' ' * max_width \n",
+    "    table_str =  ' ' * int(max_width/2) + pad_char * (max_width * (len(col_headers))) + pad_char * (len(col_headers)+1)+ '\\n' \n",
+    "    table_str += ' ' * int(max_width/2) \n",
     "\n",
     "    # add column headers to table string, using padding character and maximum column width\n",
     "    for col in col_headers:\n",
-    "        table_str += f\"{col:^{max_width+1}}\" \n",
+    "        table_str += f\"{col:^{(max_width)+1}}\" \n",
     "    \n",
     "    table_str += '\\n' \n",
-    "    table_str += ' ' * max_width + pad_char * (max_width * (len(col_headers))) +  pad_char * (len(col_headers)+1)+'\\n' \n",
+    "    table_str += ' ' * int(max_width/2) + pad_char * (max_width * (len(col_headers))) +  pad_char * (len(col_headers)+1)+'\\n' \n",
     "\n",
     "    # add row headers and values to table string, using padding character and maximum column width\n",
     "    for row in row_headers:\n",
-    "        table_str += f\"{row:^{max_width}}\"+pad_char \n",
+    "        table_str += f\"{row:<{int(max_width/2)}}\"+pad_char \n",
     "        \n",
     "        # Get the corresponding value (3rd element of tuple) for the current row and column; if no value, use '-'\n",
     "        for col in col_headers:            \n",
@@ -576,10 +474,10 @@
     "        table_str += '\\n' \n",
     "\n",
     "    # add bottom border to table string, using padding character and maximum column width    \n",
-    "    table_str += ' ' * max_width + pad_char * max_width * len(col_headers) + pad_char * (len(col_headers)+1)+  '\\n\\n' \n",
+    "    table_str += ' ' * int(max_width/2) + pad_char * max_width * len(col_headers) + pad_char * (len(col_headers)+1)+  '\\n\\n' \n",
     "    \n",
     "    # add caption for table\n",
-    "    table_str += ' ' * max_width + \"Pearson's Correlation Coefficient for %s\" % (col_headers,)\n",
+    "    table_str += ' ' * int(max_width/2) + \"Pearson's Correlation Coefficient for %s\" % (col_headers,)\n",
     "\n",
     "    # print table string\n",
     "    print(table_str)\n"
@@ -587,27 +485,27 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 255,
+   "execution_count": 71,
    "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "                     +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n",
-      "                              Age                   BMI                    BP          \n",
-      "                     +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n",
-      "         Age         +          1          +       -0.3847       +        0.4194       +\n",
-      "         BMI         +       -0.3847       +          1          +       -0.4522       +\n",
-      "         BP          +        0.4194       +       -0.4522       +          1          +\n",
-      "                     +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n",
+      "                 ----------------------------------------------------------------------------------------------------------\n",
+      "                                 age                           poverty_rate                     household_income          \n",
+      "                 ----------------------------------------------------------------------------------------------------------\n",
+      "age              -                1.0               -             -0.11502             -              0.07123             -\n",
+      "poverty_rate     -             -0.11502             -                1.0               -             -0.75418             -\n",
+      "household_income -              0.07123             -             -0.75418             -                1.0               -\n",
+      "                 ----------------------------------------------------------------------------------------------------------\n",
       "\n",
-      "                     Pearson's Correlation Coefficient for ('Age', 'BMI', 'BP')\n"
+      "                 Pearson's Correlation Coefficient for ('age', 'poverty_rate', 'household_income')\n"
      ]
     }
    ],
    "source": [
-    "FR6_print_table(tup_list,  'Age', 'BMI', 'BP', pad_char = '+')\n"
+    "FR6_print_table(FR5_output, 'age', 'poverty_rate', 'household_income', pad_char = '-')\n"
    ]
   },
   {
@@ -640,6 +538,106 @@
     "# Process Development Report for Task 1\n"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "•\tYou are expected to identify the strengths/weaknesses of your approach. For this programming task, you are expected to write a reflective report which focuses on the process taken to develop a solution to the task. Please reflect on your experiences rather than simply describing what you did. The report should: \n",
+    "o\tinclude an explanation of how you approached the task.\n",
+    "o\tidentify any strengths/weaknesses of the approach used.\n",
+    "o\tconsider how the approach used could be improved.\n",
+    "o\tsuggest alternative approaches that could have been taken instead of the one you used.\n",
+    "\n",
+    "o\tup to 8 marks for the Development Process Report\n",
+    "\tMarks will be awarded for appropriate use of technical language, critical reflection on development process and quality of engagement with the reflective process\n",
+    "\n",
+    "word count - 500 max\n",
+    "\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "FR1: arithmetic mean\n",
+    "\n",
+    "* easy to implement - good confidence boost\n",
+    "* allows for getting into a flow of coding, including comments, docstrings, naming conventions (variables and functions), testing, etc.\n",
+    "* I kept returning to the naming and commenting of early requirements - as they developed.  \n",
+    "* Challenge was getting the balance right - on the one hand, I wanted to ensure that I was including adequate comments but on the other hand, I did not want to over document.  Bearing in mind that this is an assessed element of coursework submitted for a master's module, I think I have the right balance.  In 'the real world,' I would probably shorten some of the docstrings and remove some in line comments.  \n",
+    "* My tactic has been to try to name variables and functions in an unambiguous, meaningful manner so that documentation is unnecessary.\n",
+    "* I haven't settled on a style yet - e.g. casing (proper, camel, etc.) and use of underscores.  \n",
+    "* When writing functions, I wanted them to have reuse value - that is, to keep them general.  With some requirements, this was not possible - some of my ideas would have deviated from the brief.  Examples later.  \n",
+    "* I wanted to use helpful python code where relevant, e.g. try/except blocks with specific (anticipated) errors captured.  \n",
+    "\n",
+    "* WHat I would do differently:\n",
+    "  * I need to be mindful of using restricted/reserved words as variable names.  I have used 'list' in FR1 function.  This is not a problem in this instance but it could be in others.  I will need to be more careful in future.\n",
+    "  * I also would like to assess the efficiency of my functions - there are so many ways to write functions producing the same outcomes in python.  Absolutely amazed at how flexible and powerful it is - but at the moment, I am pleased when I can get my code to work.  Efficiency, elegance, etc. will come with time and experience - but I could start using time to assess processing times for different approaches. \n",
+    "\n",
+    "FR2: read single column from CSV file\n",
+    "\n",
+    "* Again, I wanted to make a reusable function, so I added 'delimiter' as an optional parameter.  I prefer to use pipes (|) as delimiters as they are very seldom used in text.  This is something I have found handy in data migration work I have undertaken in my current job.  Quite often - common characters like commas and 'invisible' spacing characters like tabs, new line, carriage returns, etc. can wreak havoc when migrating data.  \n",
+    "* I liked this function as it was a good opportunity to use the 'with' statement.  I like that it 'closes' the file when finished, without a need for an explicit close statement.  \n",
+    "* The challenge for me in this requirement was working out how to extract the header row and then proceed to the data rows.  \n",
+    "* It was a good example of where I knew what I wanted to do and it seemed simple enough, but took some trial and error before I got there (plenty more to come!)\n",
+    "* Also added a specific FileNotFoundError exception as I expect this to be the most common error.\n",
+    "* I also added a if/else statement to account for whether the file has a header - returning results accordingly.  While this is not in the brief, I was thinking about a future proofed function which can handle both varieties.\n",
+    "\n",
+    "FR3: read CSV data from a file into memory\n",
+    "\n",
+    "* Much of the above applies - the function docstring is probably a bit long but I wanted to be clear and complete in my documentation.\n",
+    "* This function builds on FR2 - but returns a dictionary by iterating through the data rows of the file.  \n",
+    "* I worked through many versions of this - for loops, enumerate, etc. - but settled for a list comprehension, once I get it working the way it should.  I am not sure if it is the most efficient way to do it, but it works.  However, I like the conciseness of list comprehensions - although they may not be as readable as a for loop.\n",
+    "* In terms of variable names, I am not always clear on the standards - i, j, row, line, etc. and I vary between them - which annoys me.  Consistency, clarity, transparency, etc. are very important to me. I am not so happy with 'variable_data_dict' as a name, but couldn't find anything better - 'my_dict', 'data_dict', etc. - I am critical of them all. \n",
+    "\n",
+    "\n",
+    "* one problem I had with FR3 was discovered when using the function in various of FR4.  The original dictionary produced string key-value pairs - which when printing looked fine to me.  It took me some time to realise that the values were string - that they were enclosed in single quotation marks.  This was certainly a source of frustration - when something simply was not behaving the way I was anticipating.\n",
+    "  * However, once I noticed, I tried to amend the list comprehension by convernting to integers with int() - which caused an error - \"invalid literal for int() with base 10: '60.5'\"\n",
+    "  * Therefore I converted the data to floats - which may not be quite right for ages, but will not impact the calculations.\n",
+    "* \n",
+    "\n",
+    "FR4: Pearson Correlation Coefficient\n",
+    "\n",
+    "* I spent a lot of time on this function and tried to make it work using several methodologies.  \n",
+    "* Pseudo code was particularly helpful in this instance, but I have a tendency of starting with pseudo code and then deviating into a confused space of trial and error rather than updating the pseudo code.  Definitely something I can work on and take away into my current day job. \n",
+    "* Firstly I needed to remind myself how to calculate Pearson's r having only have had it calculated for me in SPSS or R.  There a couple of approaches - but the strategy is to break down the formula into standalone calculations and build the function to produce these before putting it all together to return R.  \n",
+    "* I spent a lot of time trying different approaches.  Again, like last time I started with for loops and enumerate() but found that I couldn't always achieve my intentions - that I would get slightly lost in the hierarchy and multilined approach, so I used list comprehensions instead and in the end, I am pleased with the result.\n",
+    "\n",
+    "* As previously, I tried thinking about generalising and future-proofing the function for future use and that includes considering what tests / checks / validation I could put in place.  The most obvious source of error will be the csv file so I added some assertions to ensure that the csv file has the correct data before proceeding to calculate Pearson's r.  \n",
+    "* I tested my version of Pearsons with numpy corrcoef() and it produces the same results for a small data set, which gives you that excitement of having achieved your intentions.\n",
+    "\n",
+    "\n",
+    "FR5: Pearson Correlation Coefficients for a file\n",
+    "\n",
+    "* This function builds on FR4 by calculating Pearson's r for each pair of columns in a csv file.\n",
+    "* I enjoyed using FR3 and FR4 (which in turn use FR1) within this function.  That is pretty cool.\n",
+    "* It does not have many lines of code but it took me a while to get it working.  I had to think about how to iterate through the columns of the csv in terms of what has been returned by FR3 and FR4 and then getting the column names into a list of tuples.\n",
+    "* It was not plain sailing and there was a fair bit of frustration - another good example of where I knew exactly what I wanted to do but it was hard to get there.  I had a few versions on the go - I think I could have benefited by sticking with one and updating the pseudo code.  \n",
+    "* As previously, I moved from for loops to list comprehensions as I had better success getting the results I wanted, especially returning lists of tuples.  Initially, I was returning individual lists instead of lists of tuples.  \n",
+    "* The joy of success after some struggle can be very satisfying. \n",
+    "\n",
+    "I would probably look to round some of the output in future iterations.\n",
+    "Actually - I just added the rounding now!\n",
+    "\n",
+    "And finally\n",
+    "\n",
+    "FR6: printing the results\n",
+    "\n",
+    "* I found this FR very challenging and a bit frustrating.\n",
+    "* I could have benefitted significantly by sticking with my pseudo code and keeping it up to date - but once I got stuck into a cycle of trial and error, minor success and subsequent set-back, I did not rever to pseudocode and focused on cracking the issue which was hampering me.  \n",
+    "* This function was created through trial and error - entirely\n",
+    "* I broke it down into separate sections and tried to think how I would iterate through the list of tuples to get a decent end result.\n",
+    "* Some sections worked, others didn't, changes then impacted the rest of the function and I was back to square one - hardforking!\n",
+    "* Towards the end, I had functioning portions - the top table header, the column headers, the rows headers, the r-values.  This was a mess of for loops, list comprehensions, and string formatting which I needed to work through step by step.\n",
+    "* I found cells in Jupyter notebooks to be invaluable - the ability to run portions of code and easily move it around is very effective and powerul.  I also found the ability to comment out sections of code very useful - I could easily comment out sections of code and then uncomment them to see how they impacted the rest of the function.\n",
+    "* Finally, the pieces (rows of code) were lining up - at this point, I resorted to tweaking and assessing the impact of each change.  I discovered that I needed data specific to the input table - rather than fixed widths - so created a separate function to calculate max column widths and then played with that to get the table to look passable.  \n",
+    "\n",
+    "Whew - got there in the end!  \n",
+    "\n",
+    "Although this was more challenging to get motivated over as I am sure I would use some library to print tables without the effort and challenge - i.e. pandas, prettytable, etc.  However, in hindsight I am glad I persevered as I have learned a lot about string formatting and how to iterate through lists of tuples.  It allowed me to trouble-shoot, iterate through a requirement, reassess and rework through many cycles.  I am sure I will use this knowledge in the future.\n"
+   ]
+  },
   {
    "cell_type": "markdown",
    "metadata": {},