mirror of
https://github.com/fchollet/deep-learning-with-python-notebooks.git
synced 2021-07-27 01:28:40 +03:00
321 lines
8.7 KiB
Plaintext
321 lines
8.7 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"This is a companion notebook for the book [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff). For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.\n\n**If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.**\n\nThis notebook was generated for TensorFlow 2.6."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"## Modern convnet architecture patterns"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### Modularity, hierarchy, and reuse"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### Residual connections"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"**Case where the target block changes the number of output filters**"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 0,
|
|
"metadata": {
|
|
"colab_type": "code"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"from tensorflow import keras\n",
|
|
"from tensorflow.keras import layers\n",
|
|
"\n",
|
|
"inputs = keras.Input(shape=(32, 32, 3))\n",
|
|
"x = layers.Conv2D(32, 3, activation=\"relu\")(inputs)\n",
|
|
"residual = x\n",
|
|
"x = layers.Conv2D(64, 3, activation=\"relu\", padding=\"same\")(x)\n",
|
|
"residual = layers.Conv2D(64, 1)(residual)\n",
|
|
"x = layers.add([x, residual])"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"**Case where the target block includes a max pooling layer**"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 0,
|
|
"metadata": {
|
|
"colab_type": "code"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"inputs = keras.Input(shape=(32, 32, 3))\n",
|
|
"x = layers.Conv2D(32, 3, activation=\"relu\")(inputs)\n",
|
|
"residual = x\n",
|
|
"x = layers.Conv2D(64, 3, activation=\"relu\", padding=\"same\")(x)\n",
|
|
"x = layers.MaxPooling2D(2, padding=\"same\")(x)\n",
|
|
"residual = layers.Conv2D(64, 1, strides=2)(residual)\n",
|
|
"x = layers.add([x, residual])"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 0,
|
|
"metadata": {
|
|
"colab_type": "code"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"inputs = keras.Input(shape=(32, 32, 3))\n",
|
|
"x = layers.Rescaling(1./255)(inputs)\n",
|
|
"\n",
|
|
"def residual_block(x, filters, pooling=False):\n",
|
|
" residual = x\n",
|
|
" x = layers.Conv2D(filters, 3, activation=\"relu\", padding=\"same\")(x)\n",
|
|
" x = layers.Conv2D(filters, 3, activation=\"relu\", padding=\"same\")(x)\n",
|
|
" if pooling:\n",
|
|
" x = layers.MaxPooling2D(2, padding=\"same\")(x)\n",
|
|
" residual = layers.Conv2D(filters, 1, strides=2)(residual)\n",
|
|
" elif filters != residual.shape[-1]:\n",
|
|
" residual = layers.Conv2D(filters, 1)(residual)\n",
|
|
" x = layers.add([x, residual])\n",
|
|
" return x\n",
|
|
"\n",
|
|
"x = residual_block(x, filters=32, pooling=True)\n",
|
|
"x = residual_block(x, filters=64, pooling=True)\n",
|
|
"x = residual_block(x, filters=128, pooling=False)\n",
|
|
"\n",
|
|
"x = layers.GlobalAveragePooling2D()(x)\n",
|
|
"outputs = layers.Dense(1, activation=\"sigmoid\")(x)\n",
|
|
"model = keras.Model(inputs=inputs, outputs=outputs)\n",
|
|
"model.summary()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### Batch normalization"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### Depthwise separable convolutions"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"### Putting it together: a mini Xception-like model"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 0,
|
|
"metadata": {
|
|
"colab_type": "code"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"from google.colab import files\n",
|
|
"files.upload()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 0,
|
|
"metadata": {
|
|
"colab_type": "code"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"!mkdir ~/.kaggle\n",
|
|
"!cp kaggle.json ~/.kaggle/\n",
|
|
"!chmod 600 ~/.kaggle/kaggle.json\n",
|
|
"!kaggle competitions download -c dogs-vs-cats\n",
|
|
"!unzip -qq train.zip"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 0,
|
|
"metadata": {
|
|
"colab_type": "code"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"import os, shutil, pathlib\n",
|
|
"from tensorflow.keras.utils import image_dataset_from_directory\n",
|
|
"\n",
|
|
"original_dir = pathlib.Path(\"train\")\n",
|
|
"new_base_dir = pathlib.Path(\"cats_vs_dogs_small\")\n",
|
|
"\n",
|
|
"def make_subset(subset_name, start_index, end_index):\n",
|
|
" for category in (\"cat\", \"dog\"):\n",
|
|
" dir = new_base_dir / subset_name / category\n",
|
|
" os.makedirs(dir)\n",
|
|
" fnames = [f\"{category}.{i}.jpg\" for i in range(start_index, end_index)]\n",
|
|
" for fname in fnames:\n",
|
|
" shutil.copyfile(src=original_dir / fname,\n",
|
|
" dst=dir / fname)\n",
|
|
"\n",
|
|
"make_subset(\"train\", start_index=0, end_index=1000)\n",
|
|
"make_subset(\"validation\", start_index=1000, end_index=1500)\n",
|
|
"make_subset(\"test\", start_index=1500, end_index=2500)\n",
|
|
"\n",
|
|
"train_dataset = image_dataset_from_directory(\n",
|
|
" new_base_dir / \"train\",\n",
|
|
" image_size=(180, 180),\n",
|
|
" batch_size=32)\n",
|
|
"validation_dataset = image_dataset_from_directory(\n",
|
|
" new_base_dir / \"validation\",\n",
|
|
" image_size=(180, 180),\n",
|
|
" batch_size=32)\n",
|
|
"test_dataset = image_dataset_from_directory(\n",
|
|
" new_base_dir / \"test\",\n",
|
|
" image_size=(180, 180),\n",
|
|
" batch_size=32)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 0,
|
|
"metadata": {
|
|
"colab_type": "code"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"data_augmentation = keras.Sequential(\n",
|
|
" [\n",
|
|
" layers.RandomFlip(\"horizontal\"),\n",
|
|
" layers.RandomRotation(0.1),\n",
|
|
" layers.RandomZoom(0.2),\n",
|
|
" ]\n",
|
|
")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 0,
|
|
"metadata": {
|
|
"colab_type": "code"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"inputs = keras.Input(shape=(180, 180, 3))\n",
|
|
"x = data_augmentation(inputs)\n",
|
|
"\n",
|
|
"x = layers.Rescaling(1./255)(x)\n",
|
|
"x = layers.Conv2D(filters=32, kernel_size=5, use_bias=False)(x)\n",
|
|
"\n",
|
|
"for size in [32, 64, 128, 256, 512]:\n",
|
|
" residual = x\n",
|
|
"\n",
|
|
" x = layers.BatchNormalization()(x)\n",
|
|
" x = layers.Activation(\"relu\")(x)\n",
|
|
" x = layers.SeparableConv2D(size, 3, padding=\"same\", use_bias=False)(x)\n",
|
|
"\n",
|
|
" x = layers.BatchNormalization()(x)\n",
|
|
" x = layers.Activation(\"relu\")(x)\n",
|
|
" x = layers.SeparableConv2D(size, 3, padding=\"same\", use_bias=False)(x)\n",
|
|
"\n",
|
|
" x = layers.MaxPooling2D(3, strides=2, padding=\"same\")(x)\n",
|
|
"\n",
|
|
" residual = layers.Conv2D(\n",
|
|
" size, 1, strides=2, padding=\"same\", use_bias=False)(residual)\n",
|
|
" x = layers.add([x, residual])\n",
|
|
"\n",
|
|
"x = layers.GlobalAveragePooling2D()(x)\n",
|
|
"x = layers.Dropout(0.5)(x)\n",
|
|
"outputs = layers.Dense(1, activation=\"sigmoid\")(x)\n",
|
|
"model = keras.Model(inputs=inputs, outputs=outputs)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 0,
|
|
"metadata": {
|
|
"colab_type": "code"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"model.compile(loss=\"binary_crossentropy\",\n",
|
|
" optimizer=\"rmsprop\",\n",
|
|
" metrics=[\"accuracy\"])\n",
|
|
"history = model.fit(\n",
|
|
" train_dataset,\n",
|
|
" epochs=100,\n",
|
|
" validation_data=validation_dataset)"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"colab": {
|
|
"collapsed_sections": [],
|
|
"name": "chapter09_part02_modern-convnet-architecture-patterns.i",
|
|
"private_outputs": false,
|
|
"provenance": [],
|
|
"toc_visible": true
|
|
},
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.7.0"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 0
|
|
} |