mirror of
https://github.com/gmihaila/ml_things.git
synced 2021-10-04 01:29:04 +03:00
247 lines
6.6 KiB
Plaintext
247 lines
6.6 KiB
Plaintext
{
|
|
"nbformat": 4,
|
|
"nbformat_minor": 0,
|
|
"metadata": {
|
|
"colab": {
|
|
"name": "check_gpu.ipynb",
|
|
"version": "0.3.2",
|
|
"provenance": [],
|
|
"collapsed_sections": []
|
|
},
|
|
"kernelspec": {
|
|
"name": "python2",
|
|
"display_name": "Python 2"
|
|
},
|
|
"accelerator": "GPU"
|
|
},
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"id": "view-in-github",
|
|
"colab_type": "text"
|
|
},
|
|
"source": [
|
|
"[View in Colaboratory](https://colab.research.google.com/github/gmihaila/deep_learning_toolbox/blob/master/check_gpu.ipynb)"
|
|
]
|
|
},
|
|
{
|
|
"metadata": {
|
|
"id": "YzQz3JOiDHLB",
|
|
"colab_type": "text"
|
|
},
|
|
"cell_type": "markdown",
|
|
"source": [
|
|
"### Check GPU\n",
|
|
"\n",
|
|
"by GeorgeM."
|
|
]
|
|
},
|
|
{
|
|
"metadata": {
|
|
"id": "gu_mrI-yDGM7",
|
|
"colab_type": "code",
|
|
"colab": {
|
|
"base_uri": "https://localhost:8080/",
|
|
"height": 318
|
|
},
|
|
"outputId": "f3bfb4c4-d2f5-4970-c2c6-128cbe7c6c3c"
|
|
},
|
|
"cell_type": "code",
|
|
"source": [
|
|
"from tensorflow.python.client import device_lib\n",
|
|
"print(device_lib.list_local_devices())\n"
|
|
],
|
|
"execution_count": 11,
|
|
"outputs": [
|
|
{
|
|
"output_type": "stream",
|
|
"text": [
|
|
"[name: \"/device:CPU:0\"\n",
|
|
"device_type: \"CPU\"\n",
|
|
"memory_limit: 268435456\n",
|
|
"locality {\n",
|
|
"}\n",
|
|
"incarnation: 5511350974177550019\n",
|
|
", name: \"/device:GPU:0\"\n",
|
|
"device_type: \"GPU\"\n",
|
|
"memory_limit: 254017536\n",
|
|
"locality {\n",
|
|
" bus_id: 1\n",
|
|
" links {\n",
|
|
" }\n",
|
|
"}\n",
|
|
"incarnation: 5216852088915461449\n",
|
|
"physical_device_desc: \"device: 0, name: Tesla K80, pci bus id: 0000:00:04.0, compute capability: 3.7\"\n",
|
|
"]\n"
|
|
],
|
|
"name": "stdout"
|
|
}
|
|
]
|
|
},
|
|
{
|
|
"metadata": {
|
|
"id": "H5uKJl7qDWK7",
|
|
"colab_type": "code",
|
|
"colab": {
|
|
"base_uri": "https://localhost:8080/",
|
|
"height": 34
|
|
},
|
|
"outputId": "ff62a43b-3a33-4009-9379-70c6fea878dc"
|
|
},
|
|
"cell_type": "code",
|
|
"source": [
|
|
"import tensorflow as tf\n",
|
|
"if tf.test.gpu_device_name():\n",
|
|
" print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\n",
|
|
"else:\n",
|
|
" print(\"Please install GPU version of TF\")"
|
|
],
|
|
"execution_count": 1,
|
|
"outputs": [
|
|
{
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Default GPU Device: /device:GPU:0\n"
|
|
],
|
|
"name": "stdout"
|
|
}
|
|
]
|
|
},
|
|
{
|
|
"metadata": {
|
|
"id": "Y6jGfRdVDiXy",
|
|
"colab_type": "code",
|
|
"colab": {
|
|
"base_uri": "https://localhost:8080/",
|
|
"height": 52
|
|
},
|
|
"outputId": "76c7f78c-4fce-4ace-c5ff-fed09a5e270a"
|
|
},
|
|
"cell_type": "code",
|
|
"source": [
|
|
"import tensorflow as tf\n",
|
|
"\n",
|
|
"tf = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n",
|
|
"tf.list_devices()"
|
|
],
|
|
"execution_count": 5,
|
|
"outputs": [
|
|
{
|
|
"output_type": "execute_result",
|
|
"data": {
|
|
"text/plain": [
|
|
"[_DeviceAttributes(/job:localhost/replica:0/task:0/device:CPU:0, CPU, 268435456),\n",
|
|
" _DeviceAttributes(/job:localhost/replica:0/task:0/device:GPU:0, GPU, 254017536)]"
|
|
]
|
|
},
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"execution_count": 5
|
|
}
|
|
]
|
|
},
|
|
{
|
|
"metadata": {
|
|
"id": "PcNuzDe9EC_0",
|
|
"colab_type": "code",
|
|
"colab": {
|
|
"base_uri": "https://localhost:8080/",
|
|
"height": 52
|
|
},
|
|
"outputId": "74727f83-fa7f-42ba-d17d-edf51587ac10"
|
|
},
|
|
"cell_type": "code",
|
|
"source": [
|
|
"import tensorflow as tf\n",
|
|
"\n",
|
|
"# Creates a graph.\n",
|
|
"a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')\n",
|
|
"b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')\n",
|
|
"c = tf.matmul(a, b)\n",
|
|
"# Creates a session with log_device_placement set to True.\n",
|
|
"sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n",
|
|
"# Runs the op.\n",
|
|
"print(sess.run(c))"
|
|
],
|
|
"execution_count": 6,
|
|
"outputs": [
|
|
{
|
|
"output_type": "stream",
|
|
"text": [
|
|
"[[22. 28.]\n",
|
|
" [49. 64.]]\n"
|
|
],
|
|
"name": "stdout"
|
|
}
|
|
]
|
|
},
|
|
{
|
|
"metadata": {
|
|
"id": "_hPXFuAuEM8a",
|
|
"colab_type": "text"
|
|
},
|
|
"cell_type": "markdown",
|
|
"source": [
|
|
"You should see the following output:\n",
|
|
"\n",
|
|
"Device mapping:\n",
|
|
"/job:localhost/replica:0/task:0/device:GPU:0 -> device: 0, name: Tesla K40c, pci bus\n",
|
|
"id: 0000:05:00.0\n",
|
|
"b: /job:localhost/replica:0/task:0/device:GPU:0\n",
|
|
"a: /job:localhost/replica:0/task:0/device:GPU:0\n",
|
|
"MatMul: /job:localhost/replica:0/task:0/device:GPU:0\n",
|
|
"[[ 22. 28.]\n",
|
|
" [ 49. 64.]]"
|
|
]
|
|
},
|
|
{
|
|
"metadata": {
|
|
"id": "F1Vuxp3oDyOb",
|
|
"colab_type": "text"
|
|
},
|
|
"cell_type": "markdown",
|
|
"source": [
|
|
"### Activate TF for GPU"
|
|
]
|
|
},
|
|
{
|
|
"metadata": {
|
|
"id": "l1qZ96H2D0zw",
|
|
"colab_type": "code",
|
|
"colab": {
|
|
"base_uri": "https://localhost:8080/",
|
|
"height": 52
|
|
},
|
|
"outputId": "1eba89f8-556f-4ee9-fae9-83487a304871"
|
|
},
|
|
"cell_type": "code",
|
|
"source": [
|
|
"# Creates a graph.\n",
|
|
"\n",
|
|
"# run TF using a certain device\n",
|
|
"with tf.device('/gpu:0'):\n",
|
|
"\n",
|
|
" a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')\n",
|
|
" b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')\n",
|
|
" c = tf.matmul(a, b)\n",
|
|
"# Creates a session with log_device_placement set to True.\n",
|
|
"sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n",
|
|
"# Runs the op.\n",
|
|
"print(sess.run(c))"
|
|
],
|
|
"execution_count": 17,
|
|
"outputs": [
|
|
{
|
|
"output_type": "stream",
|
|
"text": [
|
|
"[[22. 28.]\n",
|
|
" [49. 64.]]\n"
|
|
],
|
|
"name": "stdout"
|
|
}
|
|
]
|
|
}
|
|
]
|
|
} |