From aff3dd51836443be9439978033b7068b700fdf69 Mon Sep 17 00:00:00 2001 From: George Mihaila Date: Sun, 10 Jan 2021 11:36:44 -0600 Subject: [PATCH] Created using Colaboratory --- notebooks/pytorch/bert_inner_workings.ipynb | 6225 +++++++++++++++++++ 1 file changed, 6225 insertions(+) create mode 100644 notebooks/pytorch/bert_inner_workings.ipynb diff --git a/notebooks/pytorch/bert_inner_workings.ipynb b/notebooks/pytorch/bert_inner_workings.ipynb new file mode 100644 index 0000000..5ba5df4 --- /dev/null +++ b/notebooks/pytorch/bert_inner_workings.ipynb @@ -0,0 +1,6225 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "bert_inner_workings.ipynb", + "provenance": [], + "collapsed_sections": [ + "_U3YUUYXtTgt" + ], + "toc_visible": true, + "authorship_tag": "ABX9TyNC96mi6kEBq9Pvz2NBrcIz" + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "f5271401e1d1483483d9c875024d20db": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "state": { + "_view_name": "HBoxView", + "_dom_classes": [], + "_model_name": "HBoxModel", + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.5.0", + "box_style": "", + "layout": "IPY_MODEL_d52b7b5656664890b2e7ab4bafad685f", + "_model_module": "@jupyter-widgets/controls", + "children": [ + "IPY_MODEL_e2f2cd84adf941bf81be7190558fe72b", + "IPY_MODEL_45434500a7af43daa63677780eaaf499" + ] + } + }, + "d52b7b5656664890b2e7ab4bafad685f": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + }, + "e2f2cd84adf941bf81be7190558fe72b": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "state": { + "_view_name": "ProgressView", + "style": "IPY_MODEL_9fd9205ffd0f4e2197cda4a0e9176ef9", + "_dom_classes": [], + "description": "Downloading: 100%", + "_model_name": "FloatProgressModel", + "bar_style": "success", + "max": 213450, + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "value": 213450, + "_view_count": null, + "_view_module_version": "1.5.0", + "orientation": "horizontal", + "min": 0, + "description_tooltip": null, + "_model_module": "@jupyter-widgets/controls", + "layout": "IPY_MODEL_5e953d2204994a50aa413ddd6af946fe" + } + }, + "45434500a7af43daa63677780eaaf499": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "state": { + "_view_name": "HTMLView", + "style": "IPY_MODEL_1cc50c5a31da4aefb8ad9f90f6b2c2da", + "_dom_classes": [], + "description": "", + "_model_name": "HTMLModel", + "placeholder": "​", + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "value": " 213k/213k [00:00<00:00, 278kB/s]", + "_view_count": null, + "_view_module_version": "1.5.0", + "description_tooltip": null, + "_model_module": "@jupyter-widgets/controls", + "layout": "IPY_MODEL_6fec0f4e4d2440ddaa891706851670f5" + } + }, + "9fd9205ffd0f4e2197cda4a0e9176ef9": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "state": { + "_view_name": "StyleView", + "_model_name": "ProgressStyleModel", + "description_width": "initial", + "_view_module": "@jupyter-widgets/base", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.2.0", + "bar_color": null, + "_model_module": "@jupyter-widgets/controls" + } + }, + "5e953d2204994a50aa413ddd6af946fe": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + }, + "1cc50c5a31da4aefb8ad9f90f6b2c2da": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "state": { + "_view_name": "StyleView", + "_model_name": "DescriptionStyleModel", + "description_width": "", + "_view_module": "@jupyter-widgets/base", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.2.0", + "_model_module": "@jupyter-widgets/controls" + } + }, + "6fec0f4e4d2440ddaa891706851670f5": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + }, + "03f8e141123746809a453c853709a9f7": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "state": { + "_view_name": "HBoxView", + "_dom_classes": [], + "_model_name": "HBoxModel", + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.5.0", + "box_style": "", + "layout": "IPY_MODEL_b9aa8f43278545a9b13f96b258f3954b", + "_model_module": "@jupyter-widgets/controls", + "children": [ + "IPY_MODEL_048e95840a9648d2b5712c71e41e80c9", + "IPY_MODEL_f01d3bc2c1a64af6b380694617e05258" + ] + } + }, + "b9aa8f43278545a9b13f96b258f3954b": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + }, + "048e95840a9648d2b5712c71e41e80c9": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "state": { + "_view_name": "ProgressView", + "style": "IPY_MODEL_5ee1343e0de044d18b1a0c14514f9db0", + "_dom_classes": [], + "description": "Downloading: 100%", + "_model_name": "FloatProgressModel", + "bar_style": "success", + "max": 433, + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "value": 433, + "_view_count": null, + "_view_module_version": "1.5.0", + "orientation": "horizontal", + "min": 0, + "description_tooltip": null, + "_model_module": "@jupyter-widgets/controls", + "layout": "IPY_MODEL_ece9c5791d6144eb99b784e98ed89b09" + } + }, + "f01d3bc2c1a64af6b380694617e05258": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "state": { + "_view_name": "HTMLView", + "style": "IPY_MODEL_485140f2da9d4e6aa8f31dbb7c359c96", + "_dom_classes": [], + "description": "", + "_model_name": "HTMLModel", + "placeholder": "​", + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "value": " 433/433 [00:01<00:00, 287B/s]", + "_view_count": null, + "_view_module_version": "1.5.0", + "description_tooltip": null, + "_model_module": "@jupyter-widgets/controls", + "layout": "IPY_MODEL_9c112aae348b48feac199547b10b4005" + } + }, + "5ee1343e0de044d18b1a0c14514f9db0": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "state": { + "_view_name": "StyleView", + "_model_name": "ProgressStyleModel", + "description_width": "initial", + "_view_module": "@jupyter-widgets/base", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.2.0", + "bar_color": null, + "_model_module": "@jupyter-widgets/controls" + } + }, + "ece9c5791d6144eb99b784e98ed89b09": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + }, + "485140f2da9d4e6aa8f31dbb7c359c96": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "state": { + "_view_name": "StyleView", + "_model_name": "DescriptionStyleModel", + "description_width": "", + "_view_module": "@jupyter-widgets/base", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.2.0", + "_model_module": "@jupyter-widgets/controls" + } + }, + "9c112aae348b48feac199547b10b4005": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + }, + "61c7d801e1fd414eb0f8513aca4fdf36": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "state": { + "_view_name": "HBoxView", + "_dom_classes": [], + "_model_name": "HBoxModel", + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.5.0", + "box_style": "", + "layout": "IPY_MODEL_c6479a8bd482411cbb23e25cae993d34", + "_model_module": "@jupyter-widgets/controls", + "children": [ + "IPY_MODEL_612ca63a15fc4bcb974eb0cda7c8e12b", + "IPY_MODEL_3dc98afd608c44479dd3b1f64d1c4d7a" + ] + } + }, + "c6479a8bd482411cbb23e25cae993d34": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + }, + "612ca63a15fc4bcb974eb0cda7c8e12b": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "state": { + "_view_name": "ProgressView", + "style": "IPY_MODEL_8a57ad23e56844a4924c65adfae973bd", + "_dom_classes": [], + "description": "Downloading: 100%", + "_model_name": "FloatProgressModel", + "bar_style": "success", + "max": 435779157, + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "value": 435779157, + "_view_count": null, + "_view_module_version": "1.5.0", + "orientation": "horizontal", + "min": 0, + "description_tooltip": null, + "_model_module": "@jupyter-widgets/controls", + "layout": "IPY_MODEL_73ec4b91421c4aa8908260666a7d4373" + } + }, + "3dc98afd608c44479dd3b1f64d1c4d7a": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "state": { + "_view_name": "HTMLView", + "style": "IPY_MODEL_f853af8ab5f645d49af43a4b138d6db2", + "_dom_classes": [], + "description": "", + "_model_name": "HTMLModel", + "placeholder": "​", + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "value": " 436M/436M [00:07<00:00, 61.3MB/s]", + "_view_count": null, + "_view_module_version": "1.5.0", + "description_tooltip": null, + "_model_module": "@jupyter-widgets/controls", + "layout": "IPY_MODEL_9666227b0e58497e972bb4d03624e0b4" + } + }, + "8a57ad23e56844a4924c65adfae973bd": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "state": { + "_view_name": "StyleView", + "_model_name": "ProgressStyleModel", + "description_width": "initial", + "_view_module": "@jupyter-widgets/base", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.2.0", + "bar_color": null, + "_model_module": "@jupyter-widgets/controls" + } + }, + "73ec4b91421c4aa8908260666a7d4373": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + }, + "f853af8ab5f645d49af43a4b138d6db2": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "state": { + "_view_name": "StyleView", + "_model_name": "DescriptionStyleModel", + "description_width": "", + "_view_module": "@jupyter-widgets/base", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.2.0", + "_model_module": "@jupyter-widgets/controls" + } + }, + "9666227b0e58497e972bb4d03624e0b4": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + } + } + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "_U3YUUYXtTgt" + }, + "source": [ + "##### © Copyright 2020 [George Mihaila](https://github.com/gmihaila).\n", + "\n", + "Licensed under the Apache License, Version 2.0 (the \"License\");" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "3NmlcqdctXlB" + }, + "source": [ + "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6oaNu3HJtTTo" + }, + "source": [ + "# **⚙️ Bert Inner Workings**\n", + "\n", + "## **Let's look at how an input flows through Bert.**\n", + "\n", + "\n", + "
\n", + "\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/gmihaila/ml_things/blob/master/notebooks/pytorch/gpt2_finetune_classification.ipynb)  \n", + "[![Generic badge](https://img.shields.io/badge/GitHub-Source-greensvg)](https://github.com/gmihaila/ml_things/blob/master/notebooks/pytorch/gpt2_finetune_classification.ipynb)\n", + "[![Generic badge](https://img.shields.io/badge/Article-Medium-black.svg)](https://gmihaila.medium.com/gpt2-for-text-classification-using-hugging-face-transformers-574555451832)\n", + "[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\n", + "\n", + "\n", + "
\n", + "\n", + "**Disclaimer:** *The format of this tutorial notebook is very similar to my other tutorial notebooks. This is done intentionally in order to keep readers familiar with my format.*\n", + "\n", + "
\n", + "\n", + "**Main idea:**\n", + "\n", + "I created this notebook to better understand the inner workings of Bert. I followed a lot of tutorials to try to understand the architecture, but I was never able to really understand what was happening under the hood. For me it always helps to see the actual code instead of just simple abstract diagrams that a lot of times don't match the actual implementation. If you're like me than this tutorial will help!\n", + "\n", + "I went as deep as you can go with Deep Learning - all the way to the tensor level. For me it helps to see the code and how the tensors move between layers. I feel like this level of abstraction is close enough to the core of the model to perfectly understand the inner workings.\n", + "\n", + "I will use the implementation of Bert from one of the best NLP library out there - [HuggingFace](https://huggingface.co) [Transformers](https://github.com/huggingface/transformers). More specifically, I will show the inner working of [Bert For Sequence Classification](https://huggingface.co/transformers/model_doc/bert.html#bertforsequenceclassification).\n", + "\n", + "The term **forward pass** is used in Neural Networks and it refers to the calculations involved from the input sequence all the way to output of the last layer. It's basically the flow of data from input to output.\n", + "\n", + "I will follow the code from an example input sequence all the way to the final output prediction.\n", + "\n", + "
\n", + "\n", + "## **What should I know for this notebook?**\n", + "\n", + "Some prior knowledge of Bert is needed. I won't go into any details of how Bert works. For this there is plenty of information out there.\n", + "\n", + "Since I am using the PyTorch implementation of Bert any knowledge on PyTorch is very useful.\n", + "\n", + "Knowing a little bit about the [transformers](https://github.com/huggingface/transformers) library helps too.\n", + "\n", + "\n", + "
\n", + "\n", + "## **How deep are we going?**\n", + "\n", + "I think the best way to understand such a complex model as Bert is to see the actual layer components that are used. I will dig in the code until I see the actual PyTorch layers used `torch.nn`. In my opinion there is no need to go deeper than the `torch.nn` layers. \n", + "\n", + "
\n", + "\n", + "## **Tutorial Structure**\n", + "\n", + "Each section contains multiple subsections. \n", + "\n", + "The order of each section matches the order of the model's layers from input to output.\n", + "\n", + "At the beginning of each section of code I created a diagram to illustrate the flow of tensors of that particular code.\n", + "\n", + "I created the diagrams following the model's implementation. \n", + "\n", + "The major section **Bert For Sequence Classification** starts with the **Class Call** that shows how we normally create the Bert model for sequence classification and perform a forward pass. **Class Components** contains the components of `BertForSequenceClassification` implementation.\n", + "\n", + "At the end of each major section, I assemble all components from that section and show the output and diagram.\n", + "\n", + "At the end of the notebook, I have all the code parts and diagrams assembled. \n", + "\n", + "\n", + "
\n", + "\n", + "## **Terminology**\n", + "\n", + "I will use regular deep learning terminology found in most Bert tutorials. I'm using some terms in a slightly different way:\n", + "\n", + "* **Layer** and **layers**: In this tutorial when I mention layer it can be an abstraction of a group of layers or just a single layer. When I reach `torch.nn` you know I refer to a single layer.\n", + "\n", + "* `torch.nn`: I'm referring to any PyTorch layer module. This is the deepest I will go in this tutorial.\n", + "\n", + "
\n", + "\n", + "## **How to use this notebook?**\n", + "\n", + "The purpose of this notebook is purely educational. This notebook is to be used to align known information on how Bert woks with the code implementation of Bert. I used the Bert implementation from [Transformers](https://github.com/huggingface/transformers). My contribution is on arranging the code implementation and creating associated diagrams.\n", + "\n", + "
\n", + "\n", + "## **Dataset**\n", + "\n", + "For simplicity I will only use two sentences as our data input: `I love cats!` and `He hates pineapple pizza.`. I'll pretend to do binary sentiment classification on these two sentences.\n", + "\n", + "
\n", + "\n", + "## **Coding**\n", + "\n", + "Now let's do some coding! We will go through each coding cell in the notebook and describe what it does, what's the code, and when is relevant - show the output.\n", + "\n", + "I made this format to be easy to follow if you decide to run each code cell in your own python notebook.\n", + "\n", + "When I learn from a tutorial, I always try to replicate the results. I believe it's easy to follow along if you have the code next to the explanations.\n", + "\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0xFqG3VVtxpi" + }, + "source": [ + "## **Installs**\n", + "\n", + "* **[transformers](https://github.com/huggingface/transformers)** library needs to be installed to use all the awesome code from Hugging Face. To get the latest version I will install it straight from GitHub.\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "MUz_vmR8tyoc", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "92b4a857-d9a6-4ac7-9847-dab4c708e279" + }, + "source": [ + "# install the transformers library\n", + "!pip install -q git+https://github.com/huggingface/transformers.git" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + " Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n", + " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n", + " Preparing wheel metadata ... \u001b[?25l\u001b[?25hdone\n", + "\u001b[K |████████████████████████████████| 890kB 5.7MB/s \n", + "\u001b[K |████████████████████████████████| 2.9MB 34.1MB/s \n", + "\u001b[?25h Building wheel for transformers (PEP 517) ... \u001b[?25l\u001b[?25hdone\n", + " Building wheel for sacremoses (setup.py) ... \u001b[?25l\u001b[?25hdone\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "vF3UbENTtz2_" + }, + "source": [ + "## **Imports**\n", + "\n", + "Import all needed libraries for this notebook.\n", + "\n", + "Declare parameters used for this notebook:\n", + "\n", + "* `set_seed(123)` - Always good to set a fixed seed for reproducibility.\n", + "* `n_labels` - How many labels are we using in this dataset. This is used to decide size of classification head.\n", + "* `ACT2FN` - Dictionary for special activation functions used in Bert. We'll only need the `gelu` activation function.\n", + "* `BertLayerNorm` - Shortcut for calling the PyTorch normalization layer `torch.nn.LayerNorm`.\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "gjr_J342tOPq" + }, + "source": [ + "import math\n", + "import torch\n", + "from transformers.activations import gelu\n", + "from transformers import (BertTokenizer, BertConfig, \n", + " BertForSequenceClassification, BertPreTrainedModel, \n", + " apply_chunking_to_forward, set_seed,\n", + " )\n", + "from transformers.modeling_outputs import (BaseModelOutputWithPastAndCrossAttentions, \n", + " BaseModelOutputWithPoolingAndCrossAttentions, \n", + " SequenceClassifierOutput,\n", + " )\n", + "\n", + "\n", + "# Set seed for reproducibility.\n", + "set_seed(123)\n", + "\n", + "# How many labels are we using in training.\n", + "# This is used to decide size of classification head.\n", + "n_labels = 2\n", + "\n", + "# GELU Activation function.\n", + "ACT2FN = {\"gelu\": gelu}\n", + "\n", + "# Define BertLayerNorm.\n", + "BertLayerNorm = torch.nn.LayerNorm" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jKjfPzSEknRi" + }, + "source": [ + "## **Define Input**\n", + "\n", + "Let's define some text data on which we will use Bert to classify as positive or negative.\n", + "\n", + "We encoded our positive and negative sentiments into:\n", + "* 0 - for negative sentiments.\n", + "* 1 - for positive sentiments." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "EDEubgJIt23C" + }, + "source": [ + "# array of text we want to classify\n", + "input_texts = ['I love cats!',\n", + " \"He hates pineapple pizza.\"]\n", + "\n", + "# senitmen labels\n", + "labels = [1, 0]" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "CSDrYs1klyO2" + }, + "source": [ + "## **Bert Tokenizer**\n", + "\n", + "Creating the `tokenizer` is pretty standard when using the Transformers library.\n", + "\n", + "Using our newly created `tokenizer` we'll use it on our two sentence dataset and create the `input_sequence` that will be used as input for our Bert model.\n", + "\n", + "\n", + "
\n", + "Show Bert Tokenizer Diagram\n", + "\n", + "\n", + "![bert_inner_workings_tokenizer](https://github.com/gmihaila/ml_things/raw/master/notebooks/pytorch/bert_inner_workings/bert_inner_workings_tokenizer.png)\n", + "\n", + "
\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "se4LMOBYl0TP", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 389, + "referenced_widgets": [ + "f5271401e1d1483483d9c875024d20db", + "d52b7b5656664890b2e7ab4bafad685f", + "e2f2cd84adf941bf81be7190558fe72b", + "45434500a7af43daa63677780eaaf499", + "9fd9205ffd0f4e2197cda4a0e9176ef9", + "5e953d2204994a50aa413ddd6af946fe", + "1cc50c5a31da4aefb8ad9f90f6b2c2da", + "6fec0f4e4d2440ddaa891706851670f5" + ] + }, + "outputId": "521f1283-d7f3-4151-cb56-fd09c564b939" + }, + "source": [ + "# Create BertTokenizer.\n", + "tokenizer = BertTokenizer.from_pretrained('bert-base-cased')\n", + "\n", + "# Create input sequence using tokenizer.\n", + "input_sequences = tokenizer(text=input_texts, add_special_tokens=True, padding=True, truncation=True, return_tensors='pt')\n", + "\n", + "# Since input_sequence is a dictionary we can also add the labels to it\n", + "# want to make sure all values ar tensors.\n", + "input_sequences.update({'labels':torch.tensor(labels)})\n", + "\n", + "# The tokenizer will return a dictionary of three: input_ids, attention_mask and token_type_ids.\n", + "# Let's do a pretty print.\n", + "print('PRETTY PRINT OF `input_sequences` UPDATED WITH `labels`:')\n", + "[print('%s : %s\\n'%(k,v)) for k,v in input_sequences.items()];\n", + "\n", + "# Lets see how the text looks like after Bert Tokenizer.\n", + "# We see the special tokens added.\n", + "print('ORIGINAL TEXT:')\n", + "[print(example) for example in input_texts];\n", + "print('\\nTEXT AFTER USING `BertTokenizer`:')\n", + "[print(tokenizer.decode(example)) for example in input_sequences['input_ids'].numpy()];" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "display_data", + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "f5271401e1d1483483d9c875024d20db", + "version_minor": 0, + "version_major": 2 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, description='Downloading', max=213450.0, style=ProgressStyle(descripti…" + ] + }, + "metadata": { + "tags": [] + } + }, + { + "output_type": "stream", + "text": [ + "\n", + "PRETTY PRINT OF `input_sequences` UPDATED WITH `labels`:\n", + "input_ids : tensor([[ 101, 146, 1567, 11771, 106, 102, 0, 0, 0],\n", + " [ 101, 1124, 18457, 10194, 11478, 7136, 13473, 119, 102]])\n", + "\n", + "token_type_ids : tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0],\n", + " [0, 0, 0, 0, 0, 0, 0, 0, 0]])\n", + "\n", + "attention_mask : tensor([[1, 1, 1, 1, 1, 1, 0, 0, 0],\n", + " [1, 1, 1, 1, 1, 1, 1, 1, 1]])\n", + "\n", + "labels : tensor([1, 0])\n", + "\n", + "ORIGINAL TEXT:\n", + "I love cats!\n", + "He hates pineapple pizza.\n", + "\n", + "TEXT AFTER USING `BertTokenizer`:\n", + "[CLS] I love cats! [SEP] [PAD] [PAD] [PAD]\n", + "[CLS] He hates pineapple pizza. [SEP]\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nCXokOGPqRKo" + }, + "source": [ + "## **Bert Configuration**\n", + "\n", + "\n", + "Predefined values specific to Bert architecture already defined for us by Hugging Face." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "p09u6BO4qSpJ", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 117, + "referenced_widgets": [ + "03f8e141123746809a453c853709a9f7", + "b9aa8f43278545a9b13f96b258f3954b", + "048e95840a9648d2b5712c71e41e80c9", + "f01d3bc2c1a64af6b380694617e05258", + "5ee1343e0de044d18b1a0c14514f9db0", + "ece9c5791d6144eb99b784e98ed89b09", + "485140f2da9d4e6aa8f31dbb7c359c96", + "9c112aae348b48feac199547b10b4005" + ] + }, + "outputId": "83bdfe22-a6dd-4e33-993d-e798e2c9a047" + }, + "source": [ + "# Create the bert configuration.\n", + "bert_configuraiton = BertConfig.from_pretrained('bert-base-cased')\n", + "\n", + "# Let's see number of layers.\n", + "print('NUMBER OF LAYERS:', bert_configuraiton.num_hidden_layers)\n", + "\n", + "# We can also see the size of embeddings inside Bert.\n", + "print('EMBEDDING SIZE:', bert_configuraiton.hidden_size)\n", + "\n", + "# See which activation function used in hidden layers.\n", + "print('ACTIVATIONS:', bert_configuraiton.hidden_act)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "display_data", + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "03f8e141123746809a453c853709a9f7", + "version_minor": 0, + "version_major": 2 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, description='Downloading', max=433.0, style=ProgressStyle(description_…" + ] + }, + "metadata": { + "tags": [] + } + }, + { + "output_type": "stream", + "text": [ + "\n", + "NUMBER OF LAYERS: 12\n", + "EMBEDDING SIZE: 768\n", + "ACTIVATIONS: gelu\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZZR-To16Mx91" + }, + "source": [ + "## **Bert For Sequence Classification**\n", + "\n", + "I will go over the Bert for Sequence Classification model. This is a Bert language model with a classification layer on top.\n", + "\n", + "If you plan on looking at other transformers models his tutorial will be very similar." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "25pwDSiSDhXU" + }, + "source": [ + "## **Class Call**\n", + "\n", + "Let's start with doing a forward pass using the whole model call from Hugging Face Transformer." + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 222, + "referenced_widgets": [ + "61c7d801e1fd414eb0f8513aca4fdf36", + "c6479a8bd482411cbb23e25cae993d34", + "612ca63a15fc4bcb974eb0cda7c8e12b", + "3dc98afd608c44479dd3b1f64d1c4d7a", + "8a57ad23e56844a4924c65adfae973bd", + "73ec4b91421c4aa8908260666a7d4373", + "f853af8ab5f645d49af43a4b138d6db2", + "9666227b0e58497e972bb4d03624e0b4" + ] + }, + "id": "Hm4y6XY0CEU2", + "outputId": "e9df20bf-da2f-4d22-d70b-d055c47adbb9" + }, + "source": [ + "# Let' start with the final model how we normally use.\n", + "model = BertForSequenceClassification.from_pretrained('bert-base-cased')\n", + "\n", + "# Perform a forward pass. We only care about the output and no gradients.\n", + "with torch.no_grad():\n", + " output = model.forward(**input_sequences)\n", + "\n", + "print()\n", + "\n", + "# Let's check how a forward pass output looks like.\n", + "print('FORWARD PASS OUTPUT:', output)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "display_data", + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "61c7d801e1fd414eb0f8513aca4fdf36", + "version_minor": 0, + "version_major": 2 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, description='Downloading', max=435779157.0, style=ProgressStyle(descri…" + ] + }, + "metadata": { + "tags": [] + } + }, + { + "output_type": "stream", + "text": [ + "\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-cased were not used when initializing BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias']\n", + "- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at bert-base-cased and are newly initialized: ['classifier.weight', 'classifier.bias']\n", + "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "\n", + "FORWARD PASS OUTPUT: SequenceClassifierOutput(loss=tensor(0.7454), logits=tensor([[ 0.2661, -0.1774],\n", + " [ 0.2223, -0.0847]]), hidden_states=None, attentions=None)\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XINuHJDIDwVZ" + }, + "source": [ + "## **Class Components**\n", + "\n", + "Now let's look at the code implementation and break down each part of the model and check the outputs.\n", + "\n", + "Start with the `BertForSequenceClassification` found in [transformers/src/transformers/models/bert/modeling_bert.py#L1449](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L1449).\n", + "\n", + "The `forward` pass uses the following layers:\n", + "\n", + "* [BertModel](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L815) layer: \n", + "\n", + " `self.bert = BertModel(config)`\n", + "\n", + "* [torch.nn.Dropout](https://pytorch.org/docs/stable/generated/torch.nn.Dropout.html#dropout) layer for dropout:\n", + "\n", + " `self.dropout = nn.Dropout(config.hidden_dropout_prob)`\n", + "\n", + "* [torch.nn.Linear](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html#linear) layer used for classification: \n", + "\n", + " `self.classifier = nn.Linear(config.hidden_size, config.num_labels)`" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nQUrQ24VGBqC" + }, + "source": [ + "### **BertModel**\n", + "\n", + "This is the core Bert model that can be found at: [transformers/src/transformers/models/bert/modeling_bert.py#L815](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L815).\n", + "\n", + "Hugging Face was nice enough to mention a small summary: *The bare Bert Model transformer outputting raw hidden-states without any specific head on top.*\n", + "\n", + "The `forward` pass uses the following layers:\n", + "\n", + "* [BertEmbeddings](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L165) layer: \n", + "\n", + " `self.embeddings = BertEmbeddings(config)`\n", + "\n", + "* [BertEncoder](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L512) layer:\n", + "\n", + " `self.encoder = BertEncoder(config)`\n", + "\n", + "* [BertPooler](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L601) layer:\n", + "\n", + " `self.pooler = BertPooler(config)`" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_SsmnCG-qE3I" + }, + "source": [ + "#### **Bert Embeddings**\n", + "\n", + "This is where we feed the `input_sequences` created under **Bert Tokenizer** and get our first embeddings.\n", + "\n", + "Implementation can be found at: [transformers/src/transformers/models/bert/modeling_bert.py#L165](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L165).\n", + "\n", + "This layer contains actual PyTorch layers. I won't go into farther details since this is how far we need to go.\n", + "\n", + "The `forward` pass uses following layers:\n", + "\n", + "* [torch.nn.Embedding](https://pytorch.org/docs/stable/generated/torch.nn.Embedding.html#embedding) layer for word embeddings:\n", + "\n", + " `self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)`\n", + "\n", + "* [torch.nn.Embedding](https://pytorch.org/docs/stable/generated/torch.nn.Embedding.html#embedding) layer for position embeddings:\n", + "\n", + " `self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)`\n", + "\n", + "* [torch.nn.Embedding](https://pytorch.org/docs/stable/generated/torch.nn.Embedding.html#embedding) for token type embeddings:\n", + "\n", + " `self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)`\n", + "\n", + "\n", + "* [torch.nn.LayerNorm](https://pytorch.org/docs/stable/generated/torch.nn.LayerNorm.html#layernorm) layer for normalization:\n", + "\n", + " `self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)`\n", + "\n", + "* [torch.nn.Dropout](https://pytorch.org/docs/stable/generated/torch.nn.Dropout.html#dropout) layer for dropout:\n", + "\n", + " `self.dropout = nn.Dropout(config.hidden_dropout_prob)`\n", + "\n", + "\n", + "
\n", + "Show Bert Embeddings Diagram\n", + "\n", + "![bert_inner_workings_embeddings](https://github.com/gmihaila/ml_things/raw/master/notebooks/pytorch/bert_inner_workings/bert_inner_workings_embeddings.png)\n", + "\n", + "
\n", + "\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "i3nAL6CCvZ7g", + "outputId": "1aa4c55f-dfcf-49ef-b30c-853fb3564016" + }, + "source": [ + "class BertEmbeddings(torch.nn.Module):\n", + " \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n", + "\n", + " def __init__(self, config):\n", + " super().__init__()\n", + " self.word_embeddings = torch.nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n", + " self.position_embeddings = torch.nn.Embedding(config.max_position_embeddings, config.hidden_size)\n", + " self.token_type_embeddings = torch.nn.Embedding(config.type_vocab_size, config.hidden_size)\n", + "\n", + " # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n", + " # any TensorFlow checkpoint file\n", + " self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n", + " self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)\n", + "\n", + " # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n", + " self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n", + " self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n", + "\n", + " def forward(\n", + " self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0\n", + " ):\n", + " if input_ids is not None:\n", + " input_shape = input_ids.size()\n", + " else:\n", + " input_shape = inputs_embeds.size()[:-1]\n", + "\n", + " seq_length = input_shape[1]\n", + "\n", + " if position_ids is None:\n", + " position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]\n", + "\n", + " # ADDED\n", + " print('Created Tokens Positions IDs:\\n', position_ids)\n", + " \n", + "\n", + " if token_type_ids is None:\n", + " token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n", + "\n", + " if inputs_embeds is None:\n", + " inputs_embeds = self.word_embeddings(input_ids)\n", + " token_type_embeddings = self.token_type_embeddings(token_type_ids)\n", + "\n", + " # ADDED\n", + " print('\\nTokens IDs:\\n', input_ids.shape)\n", + " print('\\nTokens Type IDs:\\n', token_type_ids.shape)\n", + " print('\\nWord Embeddings:\\n', inputs_embeds.shape)\n", + "\n", + " embeddings = inputs_embeds + token_type_embeddings\n", + " if self.position_embedding_type == \"absolute\":\n", + " position_embeddings = self.position_embeddings(position_ids)\n", + "\n", + " # ADDED\n", + " print('\\nPosition Embeddings:\\n', position_embeddings.shape)\n", + "\n", + " embeddings += position_embeddings\n", + "\n", + " # ADDED\n", + " print('\\nToken Types Embeddings:\\n', token_type_embeddings.shape)\n", + " print('\\nSum Up All Embeddings:\\n', embeddings.shape)\n", + "\n", + " embeddings = self.LayerNorm(embeddings)\n", + "\n", + " # ADDED\n", + " print('\\nEmbeddings Layer Nromalization:\\n', embeddings.shape)\n", + "\n", + " embeddings = self.dropout(embeddings)\n", + "\n", + " # ADDED\n", + " print('\\nEmbeddings Dropout Layer:\\n', embeddings.shape)\n", + " \n", + " return embeddings\n", + "\n", + "\n", + "# Create Bert embedding layer.\n", + "bert_embeddings_block = BertEmbeddings(bert_configuraiton)\n", + "\n", + "# Perform a forward pass.\n", + "embedding_output = bert_embeddings_block.forward(input_ids=input_sequences['input_ids'], token_type_ids=input_sequences['token_type_ids'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Created Tokens Positions IDs:\n", + " tensor([[0, 1, 2, 3, 4, 5, 6, 7, 8]])\n", + "\n", + "Tokens IDs:\n", + " torch.Size([2, 9])\n", + "\n", + "Tokens Type IDs:\n", + " torch.Size([2, 9])\n", + "\n", + "Word Embeddings:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Position Embeddings:\n", + " torch.Size([1, 9, 768])\n", + "\n", + "Token Types Embeddings:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Sum Up All Embeddings:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Embeddings Layer Nromalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Embeddings Dropout Layer:\n", + " torch.Size([2, 9, 768])\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MVyeQ1r-tKMF" + }, + "source": [ + "#### **Bert Encoder**\n", + "\n", + "This layer contains the core of the bert model where the self-attention happens. \n", + "\n", + "The implementation can be found at: [transformers/src/transformers/models/bert/modeling_bert.py#L512](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L512).\n", + "\n", + "\n", + "The `forward` pass uses:\n", + "\n", + "* 12 of the [BertLayer](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L429) layers ( in this setup `config.num_hidden_layers=12`):\n", + "\n", + " `self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])`" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2j7cwDvwuUFT" + }, + "source": [ + "##### **Bert Layer**\n", + "\n", + "This layer contains basic components of the self-attention implementation.\n", + "\n", + "Implementation can be found at [transformers/src/transformers/models/bert/modeling_bert.py#L429](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L429).\n", + "\n", + "The `forward` pass uses:\n", + "\n", + "* [BertAttention](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L351) layer:\n", + "\n", + " `self.attention = BertAttention(config)`\n", + "\n", + "* [BertIntermediate](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L400) layer:\n", + "\n", + " `self.intermediate = BertIntermediate(config)`\n", + "\n", + "* [BertOutput](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L415) layer:\n", + "\n", + " `self.output = BertOutput(config)`\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "A8inSs_tuXH0" + }, + "source": [ + "###### **Bert Attention**\n", + "\n", + "This layer contains basic components of the self-attention implementation.\n", + "\n", + "Implementation can be found at [transformers/src/transformers/models/bert/modeling_bert.py#L351](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L351).\n", + "\n", + "The `forward` pass uses:\n", + "\n", + "* [BertSelfAttention](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L212) layer:\n", + "\n", + " `self.self = BertSelfAttention(config)`\n", + "\n", + "* [BertSelfOutput](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L337) layer:\n", + "\n", + " `self.output = BertSelfOutput(config)`\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "veMN9SrpueGj" + }, + "source": [ + "####### **BertSelfAttention**\n", + "\n", + "This layer contains the `torch.nn` basic components of the self-attention implementation.\n", + "\n", + "Implementation can be found at [transformers/src/transformers/models/bert/modeling_bert.py#L212](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L212).\n", + "\n", + "The `forward` pass uses:\n", + "\n", + "* [torch.nn.Linear](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html#linear) used for the Query layer: \n", + "\n", + " `self.query = nn.Linear(config.hidden_size, self.all_head_size)`\n", + "\n", + "* [torch.nn.Linear](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html#linear) used for the Key layer: \n", + "\n", + " `self.key = nn.Linear(config.hidden_size, self.all_head_size)`\n", + "\n", + "* [torch.nn.Linear](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html#linear) used for the Value layer: \n", + "\n", + " `self.value = nn.Linear(config.hidden_size, self.all_head_size)`\n", + "\n", + "* [torch.nn.Dropout](https://pytorch.org/docs/stable/generated/torch.nn.Dropout.html#dropout) layer for dropout:\n", + "\n", + " `self.dropout = nn.Dropout(config.attention_probs_dropout_prob)`\n", + "\n", + "
\n", + "Show BertSelfAttention Diagram\n", + "\n", + "![bert_inner_workings_selfattention](https://github.com/gmihaila/ml_things/raw/master/notebooks/pytorch/bert_inner_workings/bert_inner_workings_selfattention.png)\n", + "\n", + "
\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "GybQyEPzTEeQ", + "outputId": "c63214b2-5ee3-438e-b7a3-f35036a9f707" + }, + "source": [ + "class BertSelfAttention(torch.nn.Module):\n", + " def __init__(self, config):\n", + " super().__init__()\n", + " if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n", + " raise ValueError(\n", + " \"The hidden size (%d) is not a multiple of the number of attention \"\n", + " \"heads (%d)\" % (config.hidden_size, config.num_attention_heads)\n", + " )\n", + "\n", + " self.num_attention_heads = config.num_attention_heads\n", + " self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n", + " self.all_head_size = self.num_attention_heads * self.attention_head_size\n", + "\n", + " # ADDED\n", + " print('Attention Head Size:\\n', self.attention_head_size)\n", + " print('\\nCombined Attentions Head Size:\\n', self.all_head_size)\n", + "\n", + " self.query = torch.nn.Linear(config.hidden_size, self.all_head_size)\n", + " self.key = torch.nn.Linear(config.hidden_size, self.all_head_size)\n", + " self.value = torch.nn.Linear(config.hidden_size, self.all_head_size)\n", + "\n", + " self.dropout = torch.nn.Dropout(config.attention_probs_dropout_prob)\n", + " self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n", + " if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n", + " self.max_position_embeddings = config.max_position_embeddings\n", + " self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)\n", + "\n", + " self.is_decoder = config.is_decoder\n", + "\n", + " def transpose_for_scores(self, x):\n", + " new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n", + " x = x.view(*new_x_shape)\n", + " return x.permute(0, 2, 1, 3)\n", + "\n", + " def forward(\n", + " self,\n", + " hidden_states,\n", + " attention_mask=None,\n", + " head_mask=None,\n", + " encoder_hidden_states=None,\n", + " encoder_attention_mask=None,\n", + " past_key_value=None,\n", + " output_attentions=False,\n", + " ):\n", + " # ADDED\n", + " print('\\nHidden States:\\n', hidden_states.shape)\n", + "\n", + " mixed_query_layer = self.query(hidden_states)\n", + "\n", + " # If this is instantiated as a cross-attention module, the keys\n", + " # and values come from an encoder; the attention mask needs to be\n", + " # such that the encoder's padding tokens are not attended to.\n", + " is_cross_attention = encoder_hidden_states is not None\n", + "\n", + " if is_cross_attention and past_key_value is not None:\n", + "\n", + " # ADDED\n", + " print('\\nQuery Linear Layer:\\n', mixed_query_layer.shape)\n", + " print('\\nKey Linear Layer:\\n', past_key_value[0].shape)\n", + " print('\\nValue Linear Layer:\\n', past_key_value[1].shape)\n", + "\n", + " # reuse k,v, cross_attentions\n", + " key_layer = past_key_value[0]\n", + " value_layer = past_key_value[1]\n", + " attention_mask = encoder_attention_mask\n", + " elif is_cross_attention:\n", + "\n", + " # ADDED\n", + " print('\\nQuery Linear Layer:\\n', mixed_query_layer.shape)\n", + " print('\\nKey Linear Layer:\\n', self.key(encoder_hidden_states).shape)\n", + " print('\\nValue Linear Layer:\\n', self.value(encoder_hidden_states).shape)\n", + "\n", + " key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))\n", + " value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))\n", + " attention_mask = encoder_attention_mask\n", + " elif past_key_value is not None:\n", + "\n", + " # ADDED\n", + " print('\\nQuery Linear Layer:\\n', mixed_query_layer.shape)\n", + " print('\\nKey Linear Layer:\\n', self.key(hidden_states).shape)\n", + " print('\\nValue Linear Layer:\\n', self.value(hidden_states).shape)\n", + "\n", + " key_layer = self.transpose_for_scores(self.key(hidden_states))\n", + " value_layer = self.transpose_for_scores(self.value(hidden_states))\n", + " key_layer = torch.cat([past_key_value[0], key_layer], dim=2)\n", + " value_layer = torch.cat([past_key_value[1], value_layer], dim=2)\n", + " else:\n", + "\n", + " # ADDED\n", + " print('\\nQuery Linear Layer:\\n', mixed_query_layer.shape)\n", + " print('\\nKey Linear Layer:\\n', self.key(hidden_states).shape)\n", + " print('\\nValue Linear Layer:\\n', self.value(hidden_states).shape)\n", + "\n", + " key_layer = self.transpose_for_scores(self.key(hidden_states))\n", + " value_layer = self.transpose_for_scores(self.value(hidden_states))\n", + "\n", + " \n", + " \n", + "\n", + " query_layer = self.transpose_for_scores(mixed_query_layer)\n", + "\n", + " # ADDED\n", + " print('\\nQuery:\\n', query_layer.shape)\n", + " print('\\nKey:\\n', key_layer.shape)\n", + " print('\\nValue:\\n', value_layer.shape)\n", + "\n", + " if self.is_decoder:\n", + " # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n", + " # Further calls to cross_attention layer can then reuse all cross-attention\n", + " # key/value_states (first \"if\" case)\n", + " # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n", + " # all previous decoder key/value_states. Further calls to uni-directional self-attention\n", + " # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n", + " # if encoder bi-directional self-attention `past_key_value` is always `None`\n", + " past_key_value = (key_layer, value_layer)\n", + "\n", + " # ADDED\n", + " print('\\nKey Transposed:\\n', key_layer.transpose(-1, -2).shape)\n", + "\n", + " # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n", + " attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n", + "\n", + " # ADDED\n", + " print('\\nAttention Scores:\\n', attention_scores.shape)\n", + "\n", + " if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n", + " seq_length = hidden_states.size()[1]\n", + " position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)\n", + " position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)\n", + " distance = position_ids_l - position_ids_r\n", + " positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)\n", + " positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility\n", + "\n", + " if self.position_embedding_type == \"relative_key\":\n", + " relative_position_scores = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n", + " attention_scores = attention_scores + relative_position_scores\n", + " elif self.position_embedding_type == \"relative_key_query\":\n", + " relative_position_scores_query = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n", + " relative_position_scores_key = torch.einsum(\"bhrd,lrd->bhlr\", key_layer, positional_embedding)\n", + " attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key\n", + "\n", + " attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n", + "\n", + " # ADDED\n", + " print('\\nAttention Scores Divided by Scalar:\\n', attention_scores.shape)\n", + "\n", + " if attention_mask is not None:\n", + " # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n", + " attention_scores = attention_scores + attention_mask\n", + "\n", + " # Normalize the attention scores to probabilities.\n", + " attention_probs = torch.nn.Softmax(dim=-1)(attention_scores)\n", + "\n", + " # ADDED\n", + " print('\\nAttention Probabilities Softmax Layer:\\n', attention_probs.shape)\n", + "\n", + " # This is actually dropping out entire tokens to attend to, which might\n", + " # seem a bit unusual, but is taken from the original Transformer paper.\n", + " attention_probs = self.dropout(attention_probs)\n", + "\n", + " # ADDED\n", + " print('\\nAttention Probabilities Dropout Layer:\\n', attention_probs.shape)\n", + "\n", + " # Mask heads if we want to\n", + " if head_mask is not None:\n", + " attention_probs = attention_probs * head_mask\n", + "\n", + " context_layer = torch.matmul(attention_probs, value_layer)\n", + "\n", + " # ADDED\n", + " print('\\nContext:\\n', context_layer.shape)\n", + "\n", + " context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n", + "\n", + " # ADDED\n", + " print('\\nContext Permute:\\n', context_layer.shape)\n", + "\n", + " new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n", + " context_layer = context_layer.view(*new_context_layer_shape)\n", + "\n", + " # ADDED\n", + " print('\\nContext Reshaped:\\n', context_layer.shape)\n", + " \n", + " outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n", + "\n", + " if self.is_decoder:\n", + " outputs = outputs + (past_key_value,)\n", + " return outputs\n", + "\n", + "# Create bert self attention layer.\n", + "bert_selfattention_block = BertSelfAttention(bert_configuraiton)\n", + "\n", + "# Perform a forward pass.\n", + "context_embedding = bert_selfattention_block.forward(hidden_states=embedding_output)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "YGGg3Z3b915Z" + }, + "source": [ + "####### **BertSelfOutput**\n", + "\n", + "This layer contains the `torch.nn` basic components of the self-attention implementation.\n", + "\n", + "Implementation can be found at [transformers/src/transformers/models/bert/modeling_bert.py#L337](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L337).\n", + "\n", + "The `forward` pass uses:\n", + "\n", + "* [torch.nn.Linear](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html#linear) layer: \n", + "\n", + " `self.dense = nn.Linear(config.hidden_size, config.hidden_size)`\n", + "\n", + "* [torch.nn.LayerNorm](https://pytorch.org/docs/stable/generated/torch.nn.LayerNorm.html#layernorm) layer for normalization:\n", + "\n", + " `self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)`\n", + "\n", + "* [torch.nn.Dropout](https://pytorch.org/docs/stable/generated/torch.nn.Dropout.html#dropout) layer for dropout:\n", + "\n", + " `self.dropout = nn.Dropout(config.hidden_dropout_prob)`\n", + "\n", + "
\n", + "Show BertSelfOutput Diagram\n", + "\n", + "![bert_inner_workings_selfoutput](https://github.com/gmihaila/ml_things/raw/master/notebooks/pytorch/bert_inner_workings/bert_inner_workings_selfoutput.png)\n", + "\n", + "
\n", + "\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "YL8XQFuD94Jt", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "f5f3aee0-1a16-47c7-e6a2-fb9cde1303a8" + }, + "source": [ + "class BertSelfOutput(torch.nn.Module):\n", + " def __init__(self, config):\n", + " super().__init__()\n", + " self.dense = torch.nn.Linear(config.hidden_size, config.hidden_size)\n", + " self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n", + " self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)\n", + "\n", + " def forward(self, hidden_states, input_tensor):\n", + " print('Hidden States:\\n', hidden_states.shape)\n", + "\n", + " hidden_states = self.dense(hidden_states)\n", + " print('\\nHidden States Linear Layer:\\n', hidden_states.shape)\n", + "\n", + " hidden_states = self.dropout(hidden_states)\n", + " print('\\nHidden States Dropout Layer:\\n', hidden_states.shape)\n", + "\n", + " hidden_states = self.LayerNorm(hidden_states + input_tensor)\n", + " print('\\nHidden States Normalization Layer:\\n', hidden_states.shape)\n", + "\n", + " return hidden_states\n", + "\n", + "\n", + "# Create Bert self output layer.\n", + "bert_selfoutput_block = BertSelfOutput(bert_configuraiton)\n", + "\n", + "# Perform a forward pass - context_embedding[0] because we have tuple.\n", + "attention_output = bert_selfoutput_block.forward(hidden_states=context_embedding[0], input_tensor=embedding_output)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8bowxY6TbKuI" + }, + "source": [ + "####### **Assemble BertAttention**\n", + "\n", + "\n", + "\n", + "Put together **[BertSelfAttention](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L212)** layer and **[BertSelfOutput](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L337)** layer to create the **[BertAttention](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L351) layer**.\n", + "\n", + "Now perform a `forward` pass using previous output layer as input.\n", + "\n", + "
\n", + "Show BertAttention Diagram\n", + "\n", + "![bert_inner_workings_attention](https://github.com/gmihaila/ml_things/raw/master/notebooks/pytorch/bert_inner_workings/bert_inner_workings_attention.png)\n", + "\n", + "
\n", + "\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "At4GwiLtbNNn", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "73fd9f67-40a8-417e-8403-ef083a1a16af" + }, + "source": [ + "class BertAttention(torch.nn.Module):\n", + " def __init__(self, config):\n", + " super().__init__()\n", + " self.self = BertSelfAttention(config)\n", + " self.output = BertSelfOutput(config)\n", + " self.pruned_heads = set()\n", + "\n", + " def prune_heads(self, heads):\n", + " if len(heads) == 0:\n", + " return\n", + " heads, index = find_pruneable_heads_and_indices(\n", + " heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n", + " )\n", + "\n", + " # Prune linear layers\n", + " self.self.query = prune_linear_layer(self.self.query, index)\n", + " self.self.key = prune_linear_layer(self.self.key, index)\n", + " self.self.value = prune_linear_layer(self.self.value, index)\n", + " self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n", + "\n", + " # Update hyper params and store pruned heads\n", + " self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n", + " self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n", + " self.pruned_heads = self.pruned_heads.union(heads)\n", + "\n", + " def forward(\n", + " self,\n", + " hidden_states,\n", + " attention_mask=None,\n", + " head_mask=None,\n", + " encoder_hidden_states=None,\n", + " encoder_attention_mask=None,\n", + " past_key_value=None,\n", + " output_attentions=False,\n", + " ):\n", + " self_outputs = self.self(\n", + " hidden_states,\n", + " attention_mask,\n", + " head_mask,\n", + " encoder_hidden_states,\n", + " encoder_attention_mask,\n", + " past_key_value,\n", + " output_attentions,\n", + " )\n", + " attention_output = self.output(self_outputs[0], hidden_states)\n", + " outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n", + " return outputs\n", + "\n", + "# Create attention assembled layer.\n", + "bert_attention_block = BertAttention(bert_configuraiton)\n", + "\n", + "# Perform a forward pass to wholte Bert Attention layer.\n", + "attention_output = bert_attention_block(hidden_states=embedding_output)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Ht-JRVCSGlOv" + }, + "source": [ + "###### **BertIntermediate**\n", + "\n", + "This layer contains the `torch.nn` basic components of the Bert model implementation.\n", + "\n", + "Implementation can be found at [transformers/src/transformers/models/bert/modeling_bert.py#L400](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L400).\n", + "\n", + "The `forward` pass uses:\n", + "\n", + "* [torch.nn.Linear](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html#linear) layer: \n", + "\n", + " `self.dense = nn.Linear(config.hidden_size, config.intermediate_size)`\n", + "\n", + "
\n", + "Show BertIntermediate Diagram\n", + "\n", + "![bert_inner_workings_intermediate](https://github.com/gmihaila/ml_things/raw/master/notebooks/pytorch/bert_inner_workings/bert_inner_workings_intermediate.png)\n", + "\n", + "
\n", + "\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "aGOVwx-6GnI7", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "a95ba4e5-5b33-4f5f-f645-9c72ac2213df" + }, + "source": [ + "class BertIntermediate(torch.nn.Module):\n", + " def __init__(self, config):\n", + " super().__init__()\n", + " self.dense = torch.nn.Linear(config.hidden_size, config.intermediate_size)\n", + " if isinstance(config.hidden_act, str):\n", + " self.intermediate_act_fn = ACT2FN[config.hidden_act]\n", + " else:\n", + " self.intermediate_act_fn = config.hidden_act\n", + "\n", + " def forward(self, hidden_states):\n", + " print('\\nHidden States:\\n', hidden_states.shape)\n", + "\n", + " hidden_states = self.dense(hidden_states)\n", + " print('\\nHidden States Linear Layer:\\n', hidden_states.shape)\n", + "\n", + " hidden_states = self.intermediate_act_fn(hidden_states)\n", + " print('\\nHidden States Gelu Activation Function:\\n', hidden_states.shape)\n", + "\n", + " return hidden_states\n", + "\n", + "\n", + "# Create bert intermediate layer.\n", + "bert_intermediate_block = BertIntermediate(bert_configuraiton)\n", + "\n", + "# Perform a forward pass - attention_output[0] because we have tuple.\n", + "intermediate_output = bert_intermediate_block.forward(hidden_states=attention_output[0])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aYbhT3MUIU4U" + }, + "source": [ + "###### **BertOutput**\n", + "\n", + "This layer contains the `torch.nn` basic components of the Bert model implementation.\n", + "\n", + "Implementation can be found at [transformers/src/transformers/models/bert/modeling_bert.py#L415](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L415).\n", + "\n", + "The `forward` pass uses:\n", + "\n", + "* [torch.nn.Linear](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html#linear) layer: \n", + "\n", + " `self.dense = nn.Linear(config.intermediate_size, config.hidden_size)`\n", + "\n", + "* [torch.nn.LayerNorm](https://pytorch.org/docs/stable/generated/torch.nn.LayerNorm.html#layernorm) layer for normalization:\n", + "\n", + " `self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)`\n", + "\n", + "* [torch.nn.Dropout](https://pytorch.org/docs/stable/generated/torch.nn.Dropout.html#dropout) layer for dropout:\n", + "\n", + " `self.dropout = nn.Dropout(config.hidden_dropout_prob)`\n", + "\n", + "
\n", + "Show BertOutput Diagram\n", + "\n", + "![bert_inner_workings_output](https://github.com/gmihaila/ml_things/raw/master/notebooks/pytorch/bert_inner_workings/bert_inner_workings_output.png)\n", + "\n", + "
\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "FhEF8Pn4IW1M", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "dd000e0a-c51b-4dce-beda-d0110ff8b599" + }, + "source": [ + "class BertOutput(torch.nn.Module):\n", + " def __init__(self, config):\n", + " super().__init__()\n", + " self.dense = torch.nn.Linear(config.intermediate_size, config.hidden_size)\n", + " self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n", + " self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)\n", + "\n", + " def forward(self, hidden_states, input_tensor):\n", + " print('\\nHidden States:\\n', hidden_states.shape)\n", + "\n", + " hidden_states = self.dense(hidden_states)\n", + " print('\\nHidden States Linear Layer:\\n', hidden_states.shape)\n", + "\n", + " hidden_states = self.dropout(hidden_states)\n", + " print('\\nHidden States Dropout Layer:\\n', hidden_states.shape)\n", + "\n", + " hidden_states = self.LayerNorm(hidden_states + input_tensor)\n", + " print('\\nHidden States Layer Normalization:\\n', hidden_states.shape)\n", + "\n", + " return hidden_states\n", + "\n", + "\n", + "# Create bert output layer.\n", + "bert_output_block = BertOutput(bert_configuraiton)\n", + "\n", + "# Perform forward pass - attention_output[0] dealing with tuple.\n", + "layer_output = bert_output_block.forward(hidden_states=intermediate_output, input_tensor=attention_output[0])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Wft5gZsxO7cv" + }, + "source": [ + "###### **Assemble BertLayer**\n", + "\n", + "\n", + "Put together **[BertAttention](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L351)** layer, **[BertIntermediate](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L400)** layer and **[BertOutput](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L415)** layer to create the **[BertLayer](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L429) layer**.\n", + "\n", + "\n", + "Now perform a `forward` pass using previous output layer as input.\n", + "\n", + "
\n", + "Show BertLayer Diagram\n", + "\n", + "![bert_inner_workings_layer](https://github.com/gmihaila/ml_things/raw/master/notebooks/pytorch/bert_inner_workings/bert_inner_workings_layer.png)\n", + "\n", + "
" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "gkEzZpjypGp-", + "outputId": "109467cc-44cf-452a-ea7e-1737c0358cf6" + }, + "source": [ + "class BertLayer(torch.nn.Module):\n", + " def __init__(self, config):\n", + " super().__init__()\n", + " self.chunk_size_feed_forward = config.chunk_size_feed_forward\n", + " self.seq_len_dim = 1\n", + " self.attention = BertAttention(config)\n", + " self.is_decoder = config.is_decoder\n", + " self.add_cross_attention = config.add_cross_attention\n", + " if self.add_cross_attention:\n", + " assert self.is_decoder, f\"{self} should be used as a decoder model if cross attention is added\"\n", + " self.crossattention = BertAttention(config)\n", + " self.intermediate = BertIntermediate(config)\n", + " self.output = BertOutput(config)\n", + "\n", + " def forward(\n", + " self,\n", + " hidden_states,\n", + " attention_mask=None,\n", + " head_mask=None,\n", + " encoder_hidden_states=None,\n", + " encoder_attention_mask=None,\n", + " past_key_value=None,\n", + " output_attentions=False,\n", + " ):\n", + " # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n", + " self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n", + " self_attention_outputs = self.attention(\n", + " hidden_states,\n", + " attention_mask,\n", + " head_mask,\n", + " output_attentions=output_attentions,\n", + " past_key_value=self_attn_past_key_value,\n", + " )\n", + " attention_output = self_attention_outputs[0]\n", + "\n", + " # if decoder, the last output is tuple of self-attn cache\n", + " if self.is_decoder:\n", + " outputs = self_attention_outputs[1:-1]\n", + " present_key_value = self_attention_outputs[-1]\n", + " else:\n", + " outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n", + "\n", + " cross_attn_present_key_value = None\n", + " if self.is_decoder and encoder_hidden_states is not None:\n", + " assert hasattr(\n", + " self, \"crossattention\"\n", + " ), f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`\"\n", + "\n", + " # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple\n", + " cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n", + " cross_attention_outputs = self.crossattention(\n", + " attention_output,\n", + " attention_mask,\n", + " head_mask,\n", + " encoder_hidden_states,\n", + " encoder_attention_mask,\n", + " cross_attn_past_key_value,\n", + " output_attentions,\n", + " )\n", + " attention_output = cross_attention_outputs[0]\n", + " outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights\n", + "\n", + " # add cross-attn cache to positions 3,4 of present_key_value tuple\n", + " cross_attn_present_key_value = cross_attention_outputs[-1]\n", + " present_key_value = present_key_value + cross_attn_present_key_value\n", + "\n", + " layer_output = apply_chunking_to_forward(\n", + " self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\n", + " )\n", + " outputs = (layer_output,) + outputs\n", + "\n", + " # if decoder, return the attn key/values as the last output\n", + " if self.is_decoder:\n", + " outputs = outputs + (present_key_value,)\n", + "\n", + " return outputs\n", + "\n", + " def feed_forward_chunk(self, attention_output):\n", + " intermediate_output = self.intermediate(attention_output)\n", + " layer_output = self.output(intermediate_output, attention_output)\n", + " return layer_output\n", + "\n", + "\n", + "\n", + "# Assemble block to create Bert Layer.\n", + "bert_layer_block = BertLayer(bert_configuraiton)\n", + "\n", + "# Perform feed forward on a whole Bert Layer.\n", + "layer_output = bert_layer_block.forward(hidden_states=embedding_output)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MudQ911uOSFY" + }, + "source": [ + "##### **Assemble BertEncoder**\n", + "\n", + "Put together 12 of the **[BertLayer](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L429)** layers ( in this setup `config.num_hidden_layers=12`) to create the **[BertEncoder](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L512)** layer.\n", + "\n", + "\n", + "Now perform a `forward` pass using previous output layer as input.\n", + "\n", + "\n", + "
\n", + "Show BertEncoder Diagram\n", + "\n", + "![bert_inner_workings_encoder](https://github.com/gmihaila/ml_things/raw/master/notebooks/pytorch/bert_inner_workings/bert_inner_workings_encoder.png)\n", + "\n", + "
" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "aIreUXiCoe71", + "outputId": "93e11b5a-e579-4845-eb15-48e37f528122" + }, + "source": [ + "class BertEncoder(torch.nn.Module):\n", + " def __init__(self, config):\n", + " super().__init__()\n", + " self.config = config\n", + " self.layer = torch.nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])\n", + "\n", + " def forward(\n", + " self,\n", + " hidden_states,\n", + " attention_mask=None,\n", + " head_mask=None,\n", + " encoder_hidden_states=None,\n", + " encoder_attention_mask=None,\n", + " past_key_values=None,\n", + " use_cache=None,\n", + " output_attentions=False,\n", + " output_hidden_states=False,\n", + " return_dict=True,\n", + " ):\n", + " all_hidden_states = () if output_hidden_states else None\n", + " all_self_attentions = () if output_attentions else None\n", + " all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None\n", + "\n", + " next_decoder_cache = () if use_cache else None\n", + " for i, layer_module in enumerate(self.layer):\n", + "\n", + " # ADDED\n", + " print('\\n----------------- BERT LAYER %d -----------------'%(i+1))\n", + "\n", + " if output_hidden_states:\n", + " all_hidden_states = all_hidden_states + (hidden_states,)\n", + "\n", + " layer_head_mask = head_mask[i] if head_mask is not None else None\n", + " past_key_value = past_key_values[i] if past_key_values is not None else None\n", + " if getattr(self.config, \"gradient_checkpointing\", False):\n", + "\n", + " def create_custom_forward(module):\n", + " def custom_forward(*inputs):\n", + " return module(*inputs, past_key_value, output_attentions)\n", + "\n", + " return custom_forward\n", + "\n", + " layer_outputs = torch.utils.checkpoint.checkpoint(\n", + " create_custom_forward(layer_module),\n", + " hidden_states,\n", + " attention_mask,\n", + " layer_head_mask,\n", + " encoder_hidden_states,\n", + " encoder_attention_mask,\n", + " )\n", + " else:\n", + " layer_outputs = layer_module(\n", + " hidden_states,\n", + " attention_mask,\n", + " layer_head_mask,\n", + " encoder_hidden_states,\n", + " encoder_attention_mask,\n", + " past_key_value,\n", + " output_attentions,\n", + " )\n", + "\n", + " hidden_states = layer_outputs[0]\n", + " if use_cache:\n", + " next_decoder_cache += (layer_outputs[-1],)\n", + " if output_attentions:\n", + " all_self_attentions = all_self_attentions + (layer_outputs[1],)\n", + " if self.config.add_cross_attention:\n", + " all_cross_attentions = all_cross_attentions + (layer_outputs[2],)\n", + "\n", + " if output_hidden_states:\n", + " all_hidden_states = all_hidden_states + (hidden_states,)\n", + "\n", + " if not return_dict:\n", + " return tuple(\n", + " v\n", + " for v in [\n", + " hidden_states,\n", + " next_decoder_cache,\n", + " all_hidden_states,\n", + " all_self_attentions,\n", + " all_cross_attentions,\n", + " ]\n", + " if v is not None\n", + " )\n", + " return BaseModelOutputWithPastAndCrossAttentions(\n", + " last_hidden_state=hidden_states,\n", + " past_key_values=next_decoder_cache,\n", + " hidden_states=all_hidden_states,\n", + " attentions=all_self_attentions,\n", + " cross_attentions=all_cross_attentions,\n", + " )\n", + "\n", + "# create bert encoder block by stacking 12 layers\n", + "bert_encoder_block = BertEncoder(bert_configuraiton)\n", + "\n", + "# perform forward pass on entire Bert Encoder\n", + "encoder_embedding = bert_encoder_block.forward(hidden_states=embedding_output)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "\n", + "----------------- BERT LAYER 1 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 2 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 3 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 4 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 5 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 6 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 7 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 8 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 9 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 10 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 11 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 12 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "drEUMgtPLu7C" + }, + "source": [ + "#### **BertPooler**\n", + "\n", + "This layer contains the core of the bert model where the self-attention happens. \n", + "\n", + "The implementation can be found at: [transformers/src/transformers/models/bert/modeling_bert.py#L601](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L601).\n", + "\n", + "\n", + "The `forward` pass uses:\n", + "\n", + "* [torch.nn.Linear](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html#linear) layer: \n", + "\n", + " `self.dense = torch.nn.Linear(config.hidden_size, config.hidden_size)`\n", + "\n", + "* [torch.nn.Tanh](https://pytorch.org/docs/stable/generated/torch.nn.Tanh.html#tanh) activation function layer: \n", + "\n", + " `self.activation = torch.nn.Tanh()`\n", + "\n", + "
\n", + "Show BertPooler Diagram\n", + "\n", + "![bert_inner_workings_pooler](https://github.com/gmihaila/ml_things/raw/master/notebooks/pytorch/bert_inner_workings/bert_inner_workings_pooler.png)\n", + "\n", + "
\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "i8Quz7UULw-g", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "5f435611-8f60-4f6d-eca4-4ef46463ca7a" + }, + "source": [ + "class BertPooler(torch.nn.Module):\n", + " def __init__(self, config):\n", + " super().__init__()\n", + " self.dense = torch.nn.Linear(config.hidden_size, config.hidden_size)\n", + " self.activation = torch.nn.Tanh()\n", + "\n", + " def forward(self, hidden_states):\n", + " # We \"pool\" the model by simply taking the hidden state corresponding\n", + " # to the first token.\n", + " \n", + " print('\\nHidden States:\\n', hidden_states.shape)\n", + "\n", + " first_token_tensor = hidden_states[:, 0]\n", + " print('\\nFirst Token [CLS]:\\n', first_token_tensor.shape)\n", + "\n", + " pooled_output = self.dense(first_token_tensor)\n", + " print('\\nFirst Token [CLS] Linear Layer:\\n', pooled_output.shape)\n", + "\n", + " pooled_output = self.activation(pooled_output)\n", + " print('\\nFirst Token [CLS] Tanh Activation Function:\\n', pooled_output.shape)\n", + "\n", + " return pooled_output\n", + "\n", + "\n", + "# Create bert pooler block.\n", + "bert_pooler_block = BertPooler(bert_configuraiton)\n", + "\n", + "# Perform forward pass - encoder_embedding[0] because it is a tuple.\n", + "pooled_output = bert_pooler_block(hidden_states=encoder_embedding[0])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "First Token [CLS]:\n", + " torch.Size([2, 768])\n", + "\n", + "First Token [CLS] Linear Layer:\n", + " torch.Size([2, 768])\n", + "\n", + "First Token [CLS] Tanh Activation Function:\n", + " torch.Size([2, 768])\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "iE-mqEXsNEnt" + }, + "source": [ + "#### **Assemble BertModel**\n", + "\n", + "Put together **[BertEmbeddings](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L165)** layer, **[BertEncoder](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L512)** layer and **[BertPooler](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L601)** layer to create the **[BertModel](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L815)** layer.\n", + "\n", + "\n", + "Now perform a `forward` pass using previous output layer as input.\n", + "\n", + "
\n", + "Show BertPooler Diagram\n", + "\n", + "![bert_inner_workings_model](https://github.com/gmihaila/ml_things/raw/master/notebooks/pytorch/bert_inner_workings/bert_inner_workings_model.png)\n", + "\n", + "
" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "lRSj__bRut7p", + "outputId": "7b7445d1-1599-497a-e40f-1f1eacd4c1df" + }, + "source": [ + "class BertModel(BertPreTrainedModel):\n", + " \"\"\"\n", + " The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n", + " cross-attention is added between the self-attention layers, following the architecture described in `Attention is\n", + " all you need `__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,\n", + " Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n", + " To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration\n", + " set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`\n", + " argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an\n", + " input to the forward pass.\n", + " \"\"\"\n", + "\n", + " def __init__(self, config, add_pooling_layer=True):\n", + " super().__init__(config)\n", + " self.config = config\n", + "\n", + " self.embeddings = BertEmbeddings(config)\n", + " self.encoder = BertEncoder(config)\n", + "\n", + " self.pooler = BertPooler(config) if add_pooling_layer else None\n", + "\n", + " self.init_weights()\n", + "\n", + " def get_input_embeddings(self):\n", + " return self.embeddings.word_embeddings\n", + "\n", + " def set_input_embeddings(self, value):\n", + " self.embeddings.word_embeddings = value\n", + "\n", + " def _prune_heads(self, heads_to_prune):\n", + " \"\"\"\n", + " Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n", + " class PreTrainedModel\n", + " \"\"\"\n", + " for layer, heads in heads_to_prune.items():\n", + " self.encoder.layer[layer].attention.prune_heads(heads)\n", + "\n", + " def forward(\n", + " self,\n", + " input_ids=None,\n", + " attention_mask=None,\n", + " token_type_ids=None,\n", + " position_ids=None,\n", + " head_mask=None,\n", + " inputs_embeds=None,\n", + " encoder_hidden_states=None,\n", + " encoder_attention_mask=None,\n", + " past_key_values=None,\n", + " use_cache=None,\n", + " output_attentions=None,\n", + " output_hidden_states=None,\n", + " return_dict=None,\n", + " ):\n", + " r\"\"\"\n", + " encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n", + " Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n", + " the model is configured as a decoder.\n", + " encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n", + " Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n", + " the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n", + " - 1 for tokens that are **not masked**,\n", + " - 0 for tokens that are **masked**.\n", + " past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n", + " Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n", + " If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n", + " (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n", + " instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n", + " use_cache (:obj:`bool`, `optional`):\n", + " If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n", + " decoding (see :obj:`past_key_values`).\n", + " \"\"\"\n", + " output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n", + " output_hidden_states = (\n", + " output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n", + " )\n", + " return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n", + "\n", + " if self.config.is_decoder:\n", + " use_cache = use_cache if use_cache is not None else self.config.use_cache\n", + " else:\n", + " use_cache = False\n", + "\n", + " if input_ids is not None and inputs_embeds is not None:\n", + " raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n", + " elif input_ids is not None:\n", + " input_shape = input_ids.size()\n", + " batch_size, seq_length = input_shape\n", + " elif inputs_embeds is not None:\n", + " input_shape = inputs_embeds.size()[:-1]\n", + " batch_size, seq_length = input_shape\n", + " else:\n", + " raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n", + "\n", + " device = input_ids.device if input_ids is not None else inputs_embeds.device\n", + "\n", + " # past_key_values_length\n", + " past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n", + "\n", + " if attention_mask is None:\n", + " attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)\n", + " if token_type_ids is None:\n", + " token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n", + "\n", + " # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n", + " # ourselves in which case we just need to make it broadcastable to all heads.\n", + " extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)\n", + "\n", + " # If a 2D or 3D attention mask is provided for the cross-attention\n", + " # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n", + " if self.config.is_decoder and encoder_hidden_states is not None:\n", + " encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n", + " encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n", + " if encoder_attention_mask is None:\n", + " encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n", + " encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n", + " else:\n", + " encoder_extended_attention_mask = None\n", + "\n", + " # Prepare head mask if needed\n", + " # 1.0 in head_mask indicate we keep the head\n", + " # attention_probs has shape bsz x n_heads x N x N\n", + " # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n", + " # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n", + " head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n", + "\n", + " embedding_output = self.embeddings(\n", + " input_ids=input_ids,\n", + " position_ids=position_ids,\n", + " token_type_ids=token_type_ids,\n", + " inputs_embeds=inputs_embeds,\n", + " past_key_values_length=past_key_values_length,\n", + " )\n", + " encoder_outputs = self.encoder(\n", + " embedding_output,\n", + " attention_mask=extended_attention_mask,\n", + " head_mask=head_mask,\n", + " encoder_hidden_states=encoder_hidden_states,\n", + " encoder_attention_mask=encoder_extended_attention_mask,\n", + " past_key_values=past_key_values,\n", + " use_cache=use_cache,\n", + " output_attentions=output_attentions,\n", + " output_hidden_states=output_hidden_states,\n", + " return_dict=return_dict,\n", + " )\n", + " sequence_output = encoder_outputs[0]\n", + " pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n", + "\n", + " if not return_dict:\n", + " return (sequence_output, pooled_output) + encoder_outputs[1:]\n", + "\n", + " return BaseModelOutputWithPoolingAndCrossAttentions(\n", + " last_hidden_state=sequence_output,\n", + " pooler_output=pooled_output,\n", + " past_key_values=encoder_outputs.past_key_values,\n", + " hidden_states=encoder_outputs.hidden_states,\n", + " attentions=encoder_outputs.attentions,\n", + " cross_attentions=encoder_outputs.cross_attentions,\n", + " )\n", + "\n", + "\n", + "# Create bert model.\n", + "bert_model = BertModel(bert_configuraiton)\n", + "\n", + "# Perform forward pass on entire model.\n", + "hidden_states = bert_model.forward(input_ids=input_sequences['input_ids'], attention_mask=input_sequences['attention_mask'], token_type_ids=input_sequences['token_type_ids'])\n" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Created Tokens Positions IDs:\n", + " tensor([[0, 1, 2, 3, 4, 5, 6, 7, 8]])\n", + "\n", + "Tokens IDs:\n", + " torch.Size([2, 9])\n", + "\n", + "Tokens Type IDs:\n", + " torch.Size([2, 9])\n", + "\n", + "Word Embeddings:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Position Embeddings:\n", + " torch.Size([1, 9, 768])\n", + "\n", + "Token Types Embeddings:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Sum Up All Embeddings:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Embeddings Layer Nromalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Embeddings Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 1 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 2 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 3 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 4 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 5 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 6 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 7 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 8 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 9 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 10 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 11 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 12 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "First Token [CLS]:\n", + " torch.Size([2, 768])\n", + "\n", + "First Token [CLS] Linear Layer:\n", + " torch.Size([2, 768])\n", + "\n", + "First Token [CLS] Tanh Activation Function:\n", + " torch.Size([2, 768])\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lZ6_7ir3ej1C" + }, + "source": [ + "### Assemble Components\n", + "\n", + "Put together **[BertModel](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L815)** layer, **[torch.nn.Dropout](https://pytorch.org/docs/stable/generated/torch.nn.Dropout.html#dropout)** layer and **[torch.nn.Linear](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html#linear)** layer to create the **[BertForSequenceClassification](https://github.com/huggingface/transformers/blob/d944966b19a4d6860bddc7cdc1ba928ca8a0da91/src/transformers/models/bert/modeling_bert.py#L1449)** model.\n", + "\n", + "\n", + "Now perform a `forward` pass using previous output layer as input." + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "NWtKYfRyykYa", + "outputId": "bffd373c-5b55-45f7-e94c-3741a2497608" + }, + "source": [ + "class BertForSequenceClassification(BertPreTrainedModel):\n", + " def __init__(self, config):\n", + " super().__init__(config)\n", + " self.num_labels = config.num_labels\n", + "\n", + " self.bert = BertModel(config)\n", + " self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)\n", + " self.classifier = torch.nn.Linear(config.hidden_size, config.num_labels)\n", + "\n", + " self.init_weights()\n", + "\n", + " def forward(\n", + " self,\n", + " input_ids=None,\n", + " attention_mask=None,\n", + " token_type_ids=None,\n", + " position_ids=None,\n", + " head_mask=None,\n", + " inputs_embeds=None,\n", + " labels=None,\n", + " output_attentions=None,\n", + " output_hidden_states=None,\n", + " return_dict=None,\n", + " ):\n", + " r\"\"\"\n", + " labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n", + " Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n", + " config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n", + " If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n", + " \"\"\"\n", + " return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n", + "\n", + " outputs = self.bert(\n", + " input_ids,\n", + " attention_mask=attention_mask,\n", + " token_type_ids=token_type_ids,\n", + " position_ids=position_ids,\n", + " head_mask=head_mask,\n", + " inputs_embeds=inputs_embeds,\n", + " output_attentions=output_attentions,\n", + " output_hidden_states=output_hidden_states,\n", + " return_dict=return_dict,\n", + " )\n", + "\n", + " pooled_output = outputs[1]\n", + "\n", + " pooled_output = self.dropout(pooled_output)\n", + " logits = self.classifier(pooled_output)\n", + "\n", + " loss = None\n", + " if labels is not None:\n", + " if self.num_labels == 1:\n", + " # We are doing regression\n", + " loss_fct = MSELoss()\n", + " loss = loss_fct(logits.view(-1), labels.view(-1))\n", + " else:\n", + " loss_fct = torch.nn.CrossEntropyLoss()\n", + " loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n", + "\n", + " if not return_dict:\n", + " output = (logits,) + outputs[2:]\n", + " return ((loss,) + output) if loss is not None else output\n", + "\n", + " return SequenceClassifierOutput(\n", + " loss=loss,\n", + " logits=logits,\n", + " hidden_states=outputs.hidden_states,\n", + " attentions=outputs.attentions,\n", + " )\n", + "\n", + "\n", + "# create Bert model with classification layer - BertForSequenceClassificatin\n", + "bert_for_sequence_classification_model = BertForSequenceClassification(bert_configuraiton)\n", + "\n", + "# perform forward pass on entire model\n", + "outputs = bert_for_sequence_classification_model(**input_sequences)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Attention Head Size:\n", + " 64\n", + "\n", + "Combined Attentions Head Size:\n", + " 768\n", + "Created Tokens Positions IDs:\n", + " tensor([[0, 1, 2, 3, 4, 5, 6, 7, 8]])\n", + "\n", + "Tokens IDs:\n", + " torch.Size([2, 9])\n", + "\n", + "Tokens Type IDs:\n", + " torch.Size([2, 9])\n", + "\n", + "Word Embeddings:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Position Embeddings:\n", + " torch.Size([1, 9, 768])\n", + "\n", + "Token Types Embeddings:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Sum Up All Embeddings:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Embeddings Layer Nromalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Embeddings Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 1 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 2 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 3 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 4 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 5 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 6 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 7 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 8 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 9 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 10 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 11 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "----------------- BERT LAYER 12 -----------------\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Key Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Value Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Query:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Value:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Key Transposed:\n", + " torch.Size([2, 12, 64, 9])\n", + "\n", + "Attention Scores:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Scores Divided by Scalar:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Softmax Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Attention Probabilities Dropout Layer:\n", + " torch.Size([2, 12, 9, 9])\n", + "\n", + "Context:\n", + " torch.Size([2, 12, 9, 64])\n", + "\n", + "Context Permute:\n", + " torch.Size([2, 9, 12, 64])\n", + "\n", + "Context Reshaped:\n", + " torch.Size([2, 9, 768])\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Normalization Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Gelu Activation Function:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 3072])\n", + "\n", + "Hidden States Linear Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Dropout Layer:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States Layer Normalization:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "Hidden States:\n", + " torch.Size([2, 9, 768])\n", + "\n", + "First Token [CLS]:\n", + " torch.Size([2, 768])\n", + "\n", + "First Token [CLS] Linear Layer:\n", + " torch.Size([2, 768])\n", + "\n", + "First Token [CLS] Tanh Activation Function:\n", + " torch.Size([2, 768])\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "B-3p57ECA1-S" + }, + "source": [ + "### Complete Diagram\n", + "\n", + "* If you want a **.pdf** version of this diagram: [bert_inner_workings.pdf](https://github.com/gmihaila/ml_things/raw/master/notebooks/pytorch/bert_inner_workings/bert_inner_workings.pdf).\n", + "\n", + "* If you want a **.png** version of this diagram: [bert_inner_workings.png](https://github.com/gmihaila/ml_things/raw/master/notebooks/pytorch/bert_inner_workings/bert_inner_workings.png).\n", + "\n", + "
\n", + "\n", + "![bert_inner_workings](https://github.com/gmihaila/ml_things/raw/master/notebooks/pytorch/bert_inner_workings/bert_inner_workings.png)\n" + ] + } + ] +} \ No newline at end of file