diff --git a/chapter02_mathematical-building-blocks.ipynb b/chapter02_mathematical-building-blocks.ipynb index e298216..7a9acbd 100644 --- a/chapter02_mathematical-building-blocks.ipynb +++ b/chapter02_mathematical-building-blocks.ipynb @@ -1135,7 +1135,7 @@ }, "outputs": [], "source": [ - "model = models.Sequential([\n", + "model = keras.Sequential([\n", " layers.Dense(512, activation=\"relu\"),\n", " layers.Dense(10, activation=\"softmax\")\n", "])" @@ -1280,12 +1280,16 @@ }, "outputs": [], "source": [ + "import math\n", + "\n", "class BatchGenerator:\n", " def __init__(self, images, labels, batch_size=128):\n", + " assert len(images) == len(labels)\n", " self.index = 0\n", " self.images = images\n", " self.labels = labels\n", " self.batch_size = batch_size\n", + " self.num_batches = math.ceil(len(images) / batch_size)\n", "\n", " def next(self):\n", " images = self.images[self.index : self.index + self.batch_size]\n", @@ -1374,7 +1378,7 @@ " for epoch_counter in range(epochs):\n", " print(f\"Epoch {epoch_counter}\")\n", " batch_generator = BatchGenerator(images, labels)\n", - " for batch_counter in range(len(images) // batch_size):\n", + " for batch_counter in range(batch_generator.num_batches):\n", " images_batch, labels_batch = batch_generator.next()\n", " loss = one_training_step(model, images_batch, labels_batch)\n", " if batch_counter % 100 == 0:\n", diff --git a/chapter08_intro-to-dl-for-computer-vision.ipynb b/chapter08_intro-to-dl-for-computer-vision.ipynb index 319c6ee..aff4f5b 100644 --- a/chapter08_intro-to-dl-for-computer-vision.ipynb +++ b/chapter08_intro-to-dl-for-computer-vision.ipynb @@ -341,7 +341,7 @@ "from tensorflow.keras import layers\n", "\n", "inputs = keras.Input(shape=(180, 180, 3))\n", - "x = layers.experimental.preprocessing.Rescaling(1./255)(inputs)\n", + "x = layers.Rescaling(1./255)(inputs)\n", "x = layers.Conv2D(filters=32, kernel_size=3, activation=\"relu\")(x)\n", "x = layers.MaxPooling2D(pool_size=2)(x)\n", "x = layers.Conv2D(filters=64, kernel_size=3, activation=\"relu\")(x)\n", @@ -415,7 +415,7 @@ }, "outputs": [], "source": [ - "from tensorflow.keras.preprocessing import image_dataset_from_directory\n", + "from tensorflow.keras.utils import image_dataset_from_directory\n", "\n", "train_dataset = image_dataset_from_directory(\n", " new_base_dir / \"train\",\n", @@ -663,9 +663,9 @@ "source": [ "data_augmentation = keras.Sequential(\n", " [\n", - " layers.experimental.preprocessing.RandomFlip(\"horizontal\"),\n", - " layers.experimental.preprocessing.RandomRotation(0.1),\n", - " layers.experimental.preprocessing.RandomZoom(0.2),\n", + " layers.RandomFlip(\"horizontal\"),\n", + " layers.RandomRotation(0.1),\n", + " layers.RandomZoom(0.2),\n", " ]\n", ")" ] @@ -715,7 +715,7 @@ "source": [ "inputs = keras.Input(shape=(180, 180, 3))\n", "x = data_augmentation(inputs)\n", - "x = layers.experimental.preprocessing.Rescaling(1./255)(x)\n", + "x = layers.Rescaling(1./255)(x)\n", "x = layers.Conv2D(filters=32, kernel_size=3, activation=\"relu\")(x)\n", "x = layers.MaxPooling2D(pool_size=2)(x)\n", "x = layers.Conv2D(filters=64, kernel_size=3, activation=\"relu\")(x)\n", @@ -1055,9 +1055,9 @@ "source": [ "data_augmentation = keras.Sequential(\n", " [\n", - " layers.experimental.preprocessing.RandomFlip(\"horizontal\"),\n", - " layers.experimental.preprocessing.RandomRotation(0.1),\n", - " layers.experimental.preprocessing.RandomZoom(0.2),\n", + " layers.RandomFlip(\"horizontal\"),\n", + " layers.RandomRotation(0.1),\n", + " layers.RandomZoom(0.2),\n", " ]\n", ")\n", "\n", diff --git a/chapter09_part01_image-segmentation.ipynb b/chapter09_part01_image-segmentation.ipynb index cd001dc..460ca94 100644 --- a/chapter09_part01_image-segmentation.ipynb +++ b/chapter09_part01_image-segmentation.ipynb @@ -82,7 +82,7 @@ "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", - "from tensorflow.keras.preprocessing.image import load_img, img_to_array\n", + "from tensorflow.keras.utils import load_img, img_to_array\n", "\n", "plt.axis(\"off\")\n", "plt.imshow(load_img(input_img_paths[9]))" @@ -157,7 +157,7 @@ "\n", "def get_model(img_size, num_classes):\n", " inputs = keras.Input(shape=img_size + (3,))\n", - " x = layers.experimental.preprocessing.Rescaling(1./255)(inputs)\n", + " x = layers.Rescaling(1./255)(inputs)\n", "\n", " x = layers.Conv2D(64, 3, strides=2, activation=\"relu\", padding=\"same\")(x)\n", " x = layers.Conv2D(64, 3, activation=\"relu\", padding=\"same\")(x)\n", @@ -230,7 +230,7 @@ }, "outputs": [], "source": [ - "from tensorflow.keras.preprocessing.image import array_to_img\n", + "from tensorflow.keras.utils import array_to_img\n", "\n", "model = keras.models.load_model(\"oxford_segmentation.keras\")\n", "\n", diff --git a/chapter09_part02_modern-convnet-architecture-patterns.ipynb b/chapter09_part02_modern-convnet-architecture-patterns.ipynb index 8d5fd7e..b8136b0 100644 --- a/chapter09_part02_modern-convnet-architecture-patterns.ipynb +++ b/chapter09_part02_modern-convnet-architecture-patterns.ipynb @@ -99,7 +99,7 @@ "outputs": [], "source": [ "inputs = keras.Input(shape=(32, 32, 3))\n", - "x = layers.experimental.preprocessing.Rescaling(1./255)(inputs)\n", + "x = layers.Rescaling(1./255)(inputs)\n", "\n", "def residual_block(x, filters, pooling=False):\n", " residual = x\n", @@ -186,7 +186,7 @@ "outputs": [], "source": [ "import os, shutil, pathlib\n", - "from tensorflow.keras.preprocessing import image_dataset_from_directory\n", + "from tensorflow.keras.utils import image_dataset_from_directory\n", "\n", "original_dir = pathlib.Path(\"train\")\n", "new_base_dir = pathlib.Path(\"cats_vs_dogs_small\")\n", @@ -228,9 +228,9 @@ "source": [ "data_augmentation = keras.Sequential(\n", " [\n", - " layers.experimental.preprocessing.RandomFlip(\"horizontal\"),\n", - " layers.experimental.preprocessing.RandomRotation(0.1),\n", - " layers.experimental.preprocessing.RandomZoom(0.2),\n", + " layers.RandomFlip(\"horizontal\"),\n", + " layers.RandomRotation(0.1),\n", + " layers.RandomZoom(0.2),\n", " ]\n", ")" ] @@ -246,7 +246,7 @@ "inputs = keras.Input(shape=(180, 180, 3))\n", "x = data_augmentation(inputs)\n", "\n", - "x = layers.experimental.preprocessing.Rescaling(1./255)(x)\n", + "x = layers.Rescaling(1./255)(x)\n", "x = layers.Conv2D(filters=32, kernel_size=5, use_bias=False)(x)\n", "\n", "for size in [32, 64, 128, 256, 512]:\n", diff --git a/chapter09_part03_interpreting-what-convnets-learn.ipynb b/chapter09_part03_interpreting-what-convnets-learn.ipynb index 10af311..9f9c4ab 100644 --- a/chapter09_part03_interpreting-what-convnets-learn.ipynb +++ b/chapter09_part03_interpreting-what-convnets-learn.ipynb @@ -79,9 +79,9 @@ " origin=\"https://img-datasets.s3.amazonaws.com/cat.jpg\")\n", "\n", "def get_img_array(img_path, target_size):\n", - " img = keras.preprocessing.image.load_img(\n", + " img = keras.utils.load_img(\n", " img_path, target_size=target_size)\n", - " array = keras.preprocessing.image.img_to_array(img)\n", + " array = keras.utils.img_to_array(img)\n", " array = np.expand_dims(array, axis=0)\n", " return array\n", "\n", @@ -493,7 +493,7 @@ " :,\n", " ] = image\n", "\n", - "keras.preprocessing.image.save_img(\n", + "keras.utils.save_img(\n", " f\"filters_for_layer_{layer_name}.png\", stitched_filters)" ] }, @@ -548,8 +548,8 @@ " origin=\"https://img-datasets.s3.amazonaws.com/elephant.jpg\")\n", "\n", "def get_img_array(img_path, target_size):\n", - " img = keras.preprocessing.image.load_img(img_path, target_size=target_size)\n", - " array = keras.preprocessing.image.img_to_array(img)\n", + " img = keras.utils.load_img(img_path, target_size=target_size)\n", + " array = keras.utils.img_to_array(img)\n", " array = np.expand_dims(array, axis=0)\n", " array = keras.applications.xception.preprocess_input(array)\n", " return array\n", @@ -724,8 +724,8 @@ "source": [ "import matplotlib.cm as cm\n", "\n", - "img = keras.preprocessing.image.load_img(img_path)\n", - "img = keras.preprocessing.image.img_to_array(img)\n", + "img = keras.utils.load_img(img_path)\n", + "img = keras.utils.img_to_array(img)\n", "\n", "heatmap = np.uint8(255 * heatmap)\n", "\n", @@ -733,12 +733,12 @@ "jet_colors = jet(np.arange(256))[:, :3]\n", "jet_heatmap = jet_colors[heatmap]\n", "\n", - "jet_heatmap = keras.preprocessing.image.array_to_img(jet_heatmap)\n", + "jet_heatmap = keras.utils.array_to_img(jet_heatmap)\n", "jet_heatmap = jet_heatmap.resize((img.shape[1], img.shape[0]))\n", - "jet_heatmap = keras.preprocessing.image.img_to_array(jet_heatmap)\n", + "jet_heatmap = keras.utils.img_to_array(jet_heatmap)\n", "\n", "superimposed_img = jet_heatmap * 0.4 + img\n", - "superimposed_img = keras.preprocessing.image.array_to_img(superimposed_img)\n", + "superimposed_img = keras.utils.array_to_img(superimposed_img)\n", "\n", "save_path = \"elephant_cam.jpg\"\n", "superimposed_img.save(save_path)" diff --git a/chapter10_dl-for-timeseries.ipynb b/chapter10_dl-for-timeseries.ipynb index 8d3c8d9..24ecd83 100644 --- a/chapter10_dl-for-timeseries.ipynb +++ b/chapter10_dl-for-timeseries.ipynb @@ -213,7 +213,7 @@ "import numpy as np\n", "from tensorflow import keras\n", "int_sequence = np.arange(10)\n", - "dummy_dataset = keras.preprocessing.timeseries_dataset_from_array(\n", + "dummy_dataset = keras.utils.timeseries_dataset_from_array(\n", " data=int_sequence[:-3],\n", " targets=int_sequence[3:],\n", " sequence_length=3,\n", @@ -247,7 +247,7 @@ "delay = sampling_rate * (sequence_length + 24 - 1)\n", "batch_size = 256\n", "\n", - "train_dataset = keras.preprocessing.timeseries_dataset_from_array(\n", + "train_dataset = keras.utils.timeseries_dataset_from_array(\n", " raw_data[:-delay],\n", " targets=temperature[delay:],\n", " sampling_rate=sampling_rate,\n", @@ -257,7 +257,7 @@ " start_index=0,\n", " end_index=num_train_samples)\n", "\n", - "val_dataset = keras.preprocessing.timeseries_dataset_from_array(\n", + "val_dataset = keras.utils.timeseries_dataset_from_array(\n", " raw_data[:-delay],\n", " targets=temperature[delay:],\n", " sampling_rate=sampling_rate,\n", @@ -267,7 +267,7 @@ " start_index=num_train_samples,\n", " end_index=num_train_samples + num_val_samples)\n", "\n", - "test_dataset = keras.preprocessing.timeseries_dataset_from_array(\n", + "test_dataset = keras.utils.timeseries_dataset_from_array(\n", " raw_data[:-delay],\n", " targets=temperature[delay:],\n", " sampling_rate=sampling_rate,\n", diff --git a/chapter11_part01_introduction.ipynb b/chapter11_part01_introduction.ipynb index c74ab56..1c9708d 100644 --- a/chapter11_part01_introduction.ipynb +++ b/chapter11_part01_introduction.ipynb @@ -153,7 +153,7 @@ }, "outputs": [], "source": [ - "from tensorflow.keras.layers.experimental.preprocessing import TextVectorization\n", + "from tensorflow.keras.layers import TextVectorization\n", "text_vectorization = TextVectorization(\n", " output_mode=\"int\",\n", ")" @@ -336,13 +336,13 @@ "from tensorflow import keras\n", "batch_size = 32\n", "\n", - "train_ds = keras.preprocessing.text_dataset_from_directory(\n", + "train_ds = keras.utils.text_dataset_from_directory(\n", " \"aclImdb/train\", batch_size=batch_size\n", ")\n", - "val_ds = keras.preprocessing.text_dataset_from_directory(\n", + "val_ds = keras.utils.text_dataset_from_directory(\n", " \"aclImdb/val\", batch_size=batch_size\n", ")\n", - "test_ds = keras.preprocessing.text_dataset_from_directory(\n", + "test_ds = keras.utils.text_dataset_from_directory(\n", " \"aclImdb/test\", batch_size=batch_size\n", ")" ] diff --git a/chapter11_part02_sequence-models.ipynb b/chapter11_part02_sequence-models.ipynb index 1014db5..7bc8912 100644 --- a/chapter11_part02_sequence-models.ipynb +++ b/chapter11_part02_sequence-models.ipynb @@ -82,13 +82,13 @@ " shutil.move(train_dir / category / fname,\n", " val_dir / category / fname)\n", "\n", - "train_ds = keras.preprocessing.text_dataset_from_directory(\n", + "train_ds = keras.utils.text_dataset_from_directory(\n", " \"aclImdb/train\", batch_size=batch_size\n", ")\n", - "val_ds = keras.preprocessing.text_dataset_from_directory(\n", + "val_ds = keras.utils.text_dataset_from_directory(\n", " \"aclImdb/val\", batch_size=batch_size\n", ")\n", - "test_ds = keras.preprocessing.text_dataset_from_directory(\n", + "test_ds = keras.utils.text_dataset_from_directory(\n", " \"aclImdb/test\", batch_size=batch_size\n", ")\n", "text_only_train_ds = train_ds.map(lambda x, y: x)" @@ -111,10 +111,11 @@ }, "outputs": [], "source": [ - "from tensorflow.keras.layers.experimental.preprocessing import TextVectorization\n", + "from tensorflow.keras import layers\n", + "\n", "max_length = 600\n", "max_tokens = 20000\n", - "text_vectorization = TextVectorization(\n", + "text_vectorization = layers.TextVectorization(\n", " max_tokens=max_tokens,\n", " output_mode=\"int\",\n", " output_sequence_length=max_length,\n", @@ -144,7 +145,6 @@ "outputs": [], "source": [ "import tensorflow as tf\n", - "from tensorflow.keras import layers\n", "inputs = keras.Input(shape=(None,), dtype=\"int64\")\n", "embedded = tf.one_hot(inputs, depth=max_tokens)\n", "x = layers.Bidirectional(layers.LSTM(32))(embedded)\n", diff --git a/chapter11_part03_transformer.ipynb b/chapter11_part03_transformer.ipynb index d4ca581..397e92e 100644 --- a/chapter11_part03_transformer.ipynb +++ b/chapter11_part03_transformer.ipynb @@ -109,13 +109,13 @@ " shutil.move(train_dir / category / fname,\n", " val_dir / category / fname)\n", "\n", - "train_ds = keras.preprocessing.text_dataset_from_directory(\n", + "train_ds = keras.utils.text_dataset_from_directory(\n", " \"aclImdb/train\", batch_size=batch_size\n", ")\n", - "val_ds = keras.preprocessing.text_dataset_from_directory(\n", + "val_ds = keras.utils.text_dataset_from_directory(\n", " \"aclImdb/val\", batch_size=batch_size\n", ")\n", - "test_ds = keras.preprocessing.text_dataset_from_directory(\n", + "test_ds = keras.utils.text_dataset_from_directory(\n", " \"aclImdb/test\", batch_size=batch_size\n", ")\n", "text_only_train_ds = train_ds.map(lambda x, y: x)" @@ -138,10 +138,11 @@ }, "outputs": [], "source": [ - "from tensorflow.keras.layers.experimental.preprocessing import TextVectorization\n", + "from tensorflow.keras import layers\n", + "\n", "max_length = 600\n", "max_tokens = 20000\n", - "text_vectorization = TextVectorization(\n", + "text_vectorization = layers.TextVectorization(\n", " max_tokens=max_tokens,\n", " output_mode=\"int\",\n", " output_sequence_length=max_length,\n", diff --git a/chapter11_part04_sequence-to-sequence-learning.ipynb b/chapter11_part04_sequence-to-sequence-learning.ipynb index 22a41d7..388e64d 100644 --- a/chapter11_part04_sequence-to-sequence-learning.ipynb +++ b/chapter11_part04_sequence-to-sequence-learning.ipynb @@ -103,7 +103,6 @@ }, "outputs": [], "source": [ - "from keras.layers.experimental.preprocessing import TextVectorization\n", "import tensorflow as tf\n", "import string\n", "import re\n", @@ -120,12 +119,12 @@ "vocab_size = 15000\n", "sequence_length = 20\n", "\n", - "source_vectorization = TextVectorization(\n", + "source_vectorization = layers.TextVectorization(\n", " max_tokens=vocab_size,\n", " output_mode=\"int\",\n", " output_sequence_length=sequence_length,\n", ")\n", - "target_vectorization = TextVectorization(\n", + "target_vectorization = layers.TextVectorization(\n", " max_tokens=vocab_size,\n", " output_mode=\"int\",\n", " output_sequence_length=sequence_length + 1,\n", diff --git a/chapter12_part01_text-generation.ipynb b/chapter12_part01_text-generation.ipynb index 19003e7..6fa2f17 100644 --- a/chapter12_part01_text-generation.ipynb +++ b/chapter12_part01_text-generation.ipynb @@ -136,7 +136,7 @@ "source": [ "import tensorflow as tf\n", "from tensorflow import keras\n", - "dataset = keras.preprocessing.text_dataset_from_directory(\n", + "dataset = keras.utils.text_dataset_from_directory(\n", " directory=\"aclImdb\", label_mode=None, batch_size=256)\n", "dataset = dataset.map(lambda x: tf.strings.regex_replace(x, \"
\", \" \"))" ] @@ -158,7 +158,7 @@ }, "outputs": [], "source": [ - "from tensorflow.keras.layers.experimental.preprocessing import TextVectorization\n", + "from tensorflow.keras.layers import TextVectorization\n", "\n", "sequence_length = 100\n", "vocab_size = 15000\n", diff --git a/chapter12_part02_deep-dream.ipynb b/chapter12_part02_deep-dream.ipynb index 284584e..4827f15 100644 --- a/chapter12_part02_deep-dream.ipynb +++ b/chapter12_part02_deep-dream.ipynb @@ -42,7 +42,7 @@ " \"coast.jpg\", origin=\"https://img-datasets.s3.amazonaws.com/coast.jpg\")\n", "\n", "plt.axis(\"off\")\n", - "plt.imshow(keras.preprocessing.image.load_img(base_image_path))" + "plt.imshow(keras.utils.load_img(base_image_path))" ] }, { @@ -154,8 +154,8 @@ "import numpy as np\n", "\n", "def preprocess_image(image_path):\n", - " img = keras.preprocessing.image.load_img(image_path)\n", - " img = keras.preprocessing.image.img_to_array(img)\n", + " img = keras.utils.load_img(image_path)\n", + " img = keras.utils.img_to_array(img)\n", " img = np.expand_dims(img, axis=0)\n", " img = keras.applications.inception_v3.preprocess_input(img)\n", " return img\n", @@ -201,7 +201,7 @@ " img += lost_detail\n", " shrunk_original_img = tf.image.resize(original_img, shape)\n", "\n", - "keras.preprocessing.image.save_img(\"dream.png\", deprocess_image(img.numpy()))" + "keras.utils.save_img(\"dream.png\", deprocess_image(img.numpy()))" ] }, { diff --git a/chapter12_part03_neural-style-transfer.ipynb b/chapter12_part03_neural-style-transfer.ipynb index 121620c..d5ebcb0 100644 --- a/chapter12_part03_neural-style-transfer.ipynb +++ b/chapter12_part03_neural-style-transfer.ipynb @@ -69,7 +69,7 @@ "style_reference_image_path = keras.utils.get_file(\n", " \"starry_night.jpg\", origin=\"https://img-datasets.s3.amazonaws.com/starry_night.jpg\")\n", "\n", - "original_width, original_height = keras.preprocessing.image.load_img(base_image_path).size\n", + "original_width, original_height = keras.utils.load_img(base_image_path).size\n", "img_height = 400\n", "img_width = round(original_width * img_height / original_height)" ] @@ -94,9 +94,9 @@ "import numpy as np\n", "\n", "def preprocess_image(image_path):\n", - " img = keras.preprocessing.image.load_img(\n", + " img = keras.utils.load_img(\n", " image_path, target_size=(img_height, img_width))\n", - " img = keras.preprocessing.image.img_to_array(img)\n", + " img = keras.utils.img_to_array(img)\n", " img = np.expand_dims(img, axis=0)\n", " img = keras.applications.vgg19.preprocess_input(img)\n", " return img\n", @@ -312,7 +312,7 @@ " print(f\"Iteration {i}: loss={loss:.2f}\")\n", " img = deprocess_image(combination_image.numpy())\n", " fname = f\"combination_image_at_iteration_{i}.png\"\n", - " keras.preprocessing.image.save_img(fname, img)" + " keras.utils.save_img(fname, img)" ] }, { diff --git a/chapter12_part05_gans.ipynb b/chapter12_part05_gans.ipynb index e4b3bc4..1e5574b 100644 --- a/chapter12_part05_gans.ipynb +++ b/chapter12_part05_gans.ipynb @@ -85,7 +85,7 @@ "outputs": [], "source": [ "from tensorflow import keras\n", - "dataset = keras.preprocessing.image_dataset_from_directory(\n", + "dataset = keras.utils_dataset_from_directory(\n", " \"celeba_gan\",\n", " label_mode=None,\n", " image_size=(64, 64),\n", @@ -357,7 +357,7 @@ " generated_images *= 255\n", " generated_images.numpy()\n", " for i in range(self.num_img):\n", - " img = keras.preprocessing.image.array_to_img(generated_images[i])\n", + " img = keras.utils.array_to_img(generated_images[i])\n", " img.save(f\"generated_img_{epoch:03d}_{i}.png\")" ] },