mirror of
https://github.com/fchollet/deep-learning-with-python-notebooks.git
synced 2021-07-27 01:28:40 +03:00
568 lines
13 KiB
Plaintext
568 lines
13 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"This is a companion notebook for the book [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff). For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.\n\n**If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.**\n\nThis notebook was generated for TensorFlow 2.6."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"# Conclusions"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"## Key concepts in review"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### Various approaches to AI"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### What makes deep learning special within the field of machine learning"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### How to think about deep learning"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### Key enabling technologies"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### The universal machine-learning workflow"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### Key network architectures"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"#### Densely-connected networks"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 0,
|
|
"metadata": {
|
|
"colab_type": "code"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"from tensorflow import keras\n",
|
|
"from tensorflow.keras\u00a0import\u00a0layers\n",
|
|
"inputs = keras.Input(shape=(num_input_features,))\n",
|
|
"x = layers.Dense(32,\u00a0activation=\"relu\")(inputs)\n",
|
|
"x = layers.Dense(32,\u00a0activation=\"relu\")(x)\n",
|
|
"outputs = layers.Dense(1,\u00a0activation=\"sigmoid\")(x)\n",
|
|
"model = keras.Model(inputs, outputs)\n",
|
|
"model.compile(optimizer=\"rmsprop\",\u00a0loss=\"binary_crossentropy\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 0,
|
|
"metadata": {
|
|
"colab_type": "code"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"inputs = keras.Input(shape=(num_input_features,))\n",
|
|
"x = layers.Dense(32,\u00a0activation=\"relu\")(inputs)\n",
|
|
"x = layers.Dense(32,\u00a0activation=\"relu\")(x)\n",
|
|
"outputs = layers.Dense(num_classes,\u00a0activation=\"softmax\")(x)\n",
|
|
"model = keras.Model(inputs, outputs)\n",
|
|
"model.compile(optimizer=\"rmsprop\",\u00a0loss=\"categorical_crossentropy\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 0,
|
|
"metadata": {
|
|
"colab_type": "code"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"inputs = keras.Input(shape=(num_input_features,))\n",
|
|
"x = layers.Dense(32,\u00a0activation=\"relu\")(inputs)\n",
|
|
"x = layers.Dense(32,\u00a0activation=\"relu\")(x)\n",
|
|
"outputs = layers.Dense(num_classes,\u00a0activation=\"sigmoid\")(x)\n",
|
|
"model = keras.Model(inputs, outputs)\n",
|
|
"model.compile(optimizer=\"rmsprop\",\u00a0loss=\"binary_crossentropy\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 0,
|
|
"metadata": {
|
|
"colab_type": "code"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"inputs = keras.Input(shape=(num_input_features,))\n",
|
|
"x = layers.Dense(32,\u00a0activation=\"relu\")(inputs)\n",
|
|
"x = layers.Dense(32,\u00a0activation=\"relu\")(x)\n",
|
|
"outputs layers.Dense(num_values)(x)\n",
|
|
"model = keras.Model(inputs, outputs)\n",
|
|
"model.compile(optimizer=\"rmsprop\",\u00a0loss=\"mse\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"#### Convnets"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 0,
|
|
"metadata": {
|
|
"colab_type": "code"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"inputs = keras.Input(shape=(height,\u00a0width,\u00a0channels))\n",
|
|
"x = layers.SeparableConv2D(32,\u00a03,\u00a0activation=\"relu\")(inputs)\n",
|
|
"x = layers.SeparableConv2D(64,\u00a03,\u00a0activation=\"relu\")(x)\n",
|
|
"x = layers.MaxPooling2D(2)(x)\n",
|
|
"x = layers.SeparableConv2D(64,\u00a03,\u00a0activation=\"relu\")(x)\n",
|
|
"x = layers.SeparableConv2D(128,\u00a03,\u00a0activation=\"relu\")(x)\n",
|
|
"x = layers.MaxPooling2D(2)(x)\n",
|
|
"x = layers.SeparableConv2D(64,\u00a03,\u00a0activation=\"relu\")(x)\n",
|
|
"x = layers.SeparableConv2D(128,\u00a03,\u00a0activation=\"relu\")(x)\n",
|
|
"x = layers.GlobalAveragePooling2D()(x)\n",
|
|
"x = layers.Dense(32,\u00a0activation=\"relu\")(x)\n",
|
|
"outputs = layers.Dense(num_classes,\u00a0activation=\"softmax\")(x)\n",
|
|
"model = keras.Model(inputs, outputs)\n",
|
|
"model.compile(optimizer=\"rmsprop\",\u00a0loss=\"categorical_crossentropy\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"#### RNNs"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 0,
|
|
"metadata": {
|
|
"colab_type": "code"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"inputs = keras.Input(shape=(num_timesteps,\u00a0num_features))\n",
|
|
"x = layers.LSTM(32)(inputs)\n",
|
|
"outputs = layers.Dense(num_classes,\u00a0activation=\"sigmoid\")(x)\n",
|
|
"model = keras.Model(inputs, outputs)\n",
|
|
"model.compile(optimizer=\"rmsprop\",\u00a0loss=\"binary_crossentropy\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 0,
|
|
"metadata": {
|
|
"colab_type": "code"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"inputs = keras.Input(shape=(num_timesteps,\u00a0num_features))\n",
|
|
"x = layers.LSTM(32,\u00a0return_sequences=True)(inputs)\n",
|
|
"x = layers.LSTM(32,\u00a0return_sequences=True)(x)\n",
|
|
"x = layers.LSTM(32)(x)\n",
|
|
"outputs = layers.Dense(num_classes,\u00a0activation=\"sigmoid\")(x)\n",
|
|
"model = keras.Model(inputs, outputs)\n",
|
|
"model.compile(optimizer=\"rmsprop\",\u00a0loss=\"binary_crossentropy\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"#### Transformers"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 0,
|
|
"metadata": {
|
|
"colab_type": "code"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"encoder_inputs = keras.Input(shape=(sequence_length,), dtype=\"int64\")\n",
|
|
"x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(encoder_inputs)\n",
|
|
"encoder_outputs = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)\n",
|
|
"decoder_inputs = keras.Input(shape=(None,), dtype=\"int64\")\n",
|
|
"x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(decoder_inputs)\n",
|
|
"x = TransformerDecoder(embed_dim, dense_dim, num_heads)(x, encoder_outputs)\n",
|
|
"decoder_outputs = layers.Dense(vocab_size, activation=\"softmax\")(x)\n",
|
|
"transformer = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs)\n",
|
|
"transformer.compile(optimizer=\"rmsprop\", loss=\"categorical_crossentropy\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 0,
|
|
"metadata": {
|
|
"colab_type": "code"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"inputs = keras.Input(shape=(sequence_length,), dtype=\"int64\")\n",
|
|
"x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(inputs)\n",
|
|
"x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)\n",
|
|
"x = layers.GlobalMaxPooling1D()(x)\n",
|
|
"outputs = layers.Dense(1, activation=\"sigmoid\")(x)\n",
|
|
"model = keras.Model(inputs, outputs)\n",
|
|
"model.compile(optimizer=\"rmsprop\", loss=\"binary_crossentropy\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### The space of possibilities"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"## The limitations of deep learning"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### The risk of anthropomorphizing machine-learning models"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### Automatons vs. intelligent agents"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### Local generalization vs. extreme generalization"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### The purpose of intelligence"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### Climbing the spectrum of generalization"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"## Setting the course towards greater generality in AI"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### On the importance of setting the right objective: the shortcut rule"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### A new target"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"## Implementing intelligence: the missing ingredients"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### Intelligence as sensitivity to abstract analogies"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### The two poles of abstraction"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"#### Value-centric analogy"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"#### Program-centric analogy"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"#### Cognition as a combination of both kinds of abstraction"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### The missing half of the picture"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"## The future of deep learning"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### Models as programs"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### Blending together deep learning and program synthesis"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"#### Integrating deep learning modules and algorithmic modules into hybrid systems"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"#### Using deep learning to guide program search"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### Lifelong learning and modular subroutine reuse"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### The long-term vision"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"## Staying up to date in a fast-moving field"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### Practice on real-world problems using Kaggle"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### Read about the latest developments on arXiv"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### Explore the Keras ecosystem"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"## Final words"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"colab": {
|
|
"collapsed_sections": [],
|
|
"name": "chapter14_conclusions.i",
|
|
"private_outputs": false,
|
|
"provenance": [],
|
|
"toc_visible": true
|
|
},
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.7.0"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 0
|
|
} |