Files
ml_things/kera_multi_gpu.ipynb
2018-10-24 09:50:59 -05:00

143 lines
4.4 KiB
Plaintext

{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "kera_multi_gpu.ipynb",
"version": "0.3.2",
"provenance": [],
"collapsed_sections": [],
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"[View in Colaboratory](https://colab.research.google.com/github/gmihaila/machine_learning_toolbox/blob/master/kera_multi_gpu.ipynb)"
]
},
{
"metadata": {
"id": "u1Nvcdlzi1cS",
"colab_type": "text"
},
"cell_type": "markdown",
"source": [
"# Multi GPU Keras training\n",
"## This is for 2 or more gpus, by default keras uses 1 gpu if installed\n"
]
},
{
"metadata": {
"id": "xndT2_2pgxg5",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 35
},
"outputId": "0cdbf7b2-5fd0-4e3b-e657-19d7339721f8"
},
"cell_type": "code",
"source": [
"import os, sys\n",
"import numpy as np\n",
"from keras.models import Sequential\n",
"from keras.layers import Dense, Embedding, LSTM\n",
"\n",
"os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"2,3\" #<------ CHECK YOUR MLP EMAIL\n",
"\n",
"from keras.utils.training_utils import multi_gpu_model #<------------------Esential for multi GPUs usage\n",
"\n",
"\n",
"# BUILD MODEL FUNCTION\n",
"\n",
"def BuildToyModel():\n",
" # generate some dummy data\n",
" n_examples = 1000\n",
" n_features = 100\n",
"\n",
" x_train = np.random.random((n_examples, n_features))\n",
" x_train = np.round(x_train, 2)\n",
" x_train *= 100\n",
" x_train = np.array(x_train, dtype=int)\n",
"\n",
" n_words = np.max(x_train)\n",
"\n",
" y_train = np.random.random(n_examples)\n",
"\n",
" # number of units in LSTM\n",
" n_units = 256\n",
" # numbe rof words\n",
" n_words = 100500 #vocabulary size\n",
" size_emb = 300 #size of embedding has to match\n",
" size_seq = 50\n",
"\n",
" embedding_matrix = np.random.random((n_words+2, size_emb))\n",
"\n",
" # build model\n",
" model = Sequential()\n",
" model.add(Embedding(input_dim=(n_words+2), output_dim=size_emb, weights=[embedding_matrix],\n",
" mask_zero=False, trainable=False))\n",
" # LSTM LAYER/S\n",
" model.add(LSTM(256, dropout=0.2, recurrent_dropout=0.2))\n",
"\n",
" # TREAT AS REGRESSION PROBLEM\n",
" model.add(Dense(1, activation='sigmoid'))\n",
"\n",
" return model, x_train, y_train\n",
" "
],
"execution_count": 1,
"outputs": [
{
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
],
"name": "stderr"
}
]
},
{
"metadata": {
"id": "Y336YVUIh1xB",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"# Build model\n",
"model, x_train, y_train = BuildToyModel()\n",
"\n",
"# Add how many GPUs you want/have:wq\n",
"model = multi_gpu_model(model, gpus=1) #<------------------Esential Part to run on GPUs\n",
"\n",
"# Compile model\n",
"model.compile(loss='mean_squared_error', optimizer='adam', metrics=['acc'])\n",
"\n",
"# model summary\n",
"print(model.summary()) #<------------------See lambda layers distributed on each GPU\n",
"\n",
"# batches\n",
"n_batch = 32 #<------------------make sure your batch is not too small\n",
"# epochs\n",
"n_epoch = 10\n",
"\n",
"# train model\n",
"model.fit(x_train, y_train, epochs=n_epoch, batch_size=n_batch, verbose=1)"
],
"execution_count": 0,
"outputs": []
}
]
}