simplysameer333 / MachineLearning

1 stars 1 forks source link

text_classification.py #17

Open simplysameer333 opened 5 years ago

simplysameer333 commented 5 years ago

import numpy as np import tensorflow as tf

import tensorflow_hub as hub import tensorflow_datasets as tfds

print("Version: ", tf.version) print("Eager mode: ", tf.executing_eagerly()) print("Hub version: ", hub.version) print("GPU is", "available" if tf.test.is_gpu_available() else "NOT AVAILABLE")

train_validation_split = tfds.Split.TRAIN.subsplit([8, 2])

(train_data, validation_data), test_data = tfds.load( name="imdb_reviews", split=(train_validation_split, tfds.Split.TEST), as_supervised=True)

train_examples_batch, train_labels_batch = next(iter(train_data.batch(5)))

print("train_examples_batch: ", train_examples_batch) print("train_labels_batch: ", train_labels_batch)

embedding = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1" hub_layer = hub.KerasLayer(embedding, input_shape=[], dtype=tf.string, trainable=True) hub_layer(train_examples_batch[:3])

model = tf.keras.Sequential() model.add(hub_layer) model.add(tf.keras.layers.Dense(16, activation='relu')) model.add(tf.keras.layers.Dense(1, activation='sigmoid'))

model.summary()

model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

history = model.fit(train_data.shuffle(10000).batch(512), epochs=20, validation_data=validation_data.batch(512), verbose=1)

results = model.evaluate(test_data.batch(512), verbose=0) for name, value in zip(model.metrics_names, results): print("%s: %.3f" % (name, value))