TensorFlow 2.0 is comming

TensorFlow 2.0

TensorFlow가 나온지 3년이 넘어간다… (2015년 11월)
TF 2.0은 생산성과 편리성을 초점에 두어 아래의 4가지 특징으로 설계하였다 (사용자 친화적으로 바뀜)
– Eager(Default)와 Keras(High Level API통합-v1.4 2017.11 Merged) 그리고 TF Data(Input Pipelines)로 일원화함 Pythonic한 개발을 지향

– tf.layers -> tf.keras.layers / tf.Estimator -> tf.keras (premaded)

pip install tf-nightly-2.0-preview 

  • Easy model building with Keras and eager execution.
    • TensorFlow 2.0 runs with eager execution by default 
  • Robust model deployment in production on any platform.
    • Export Model to Serving(Http), Lite(Mobile), JS(Web)
  • Powerful experimentation for research.
    • AutoGraph and Eager
  • Simplifying the API by cleaning up deprecated APIs and reducing duplication.
    • for TensorFlow Serving, TensorFlow Lite, TensorFlow.js, TensorFlow Hub, and more

Facebook의 PyTorch와 비교한 내용이 많이 보인다 (React vs Angular 때와 같이..)
TF 2.0 conversion tool will be supported (1.X하위 호환성은 유지함)
TensorFlow Dev Summit 3월경 오픈될듯

<Session vs Eager – Code>

#1. Session Style
hypothesis  = tf.div(1., 1. + tf.exp(tf.matmul(features, W) + b))
train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for step in range(EPOCHS):
        sess.run(iter.initializer)
        _, loss_value = sess.run([train, cost])
        if step % 1000 == 0:
            print("Iter: {}, Loss: {:.4f}".format(step, loss_value))
    h, c, a = sess.run([hypothesis, predicted, accuracy])
    print("\nHypothesis: ", h, "\nCorrect (Y): ", c, "\nAccuracy: ", a)
    print("\nTest Data : {}, Predict : {}".format(x_test, sess.run(predicted, feed_dict={features: x_test})))

#2. Eager Style - Slower 20~30% in 1.X
import tensorflow.contrib.eager as tfe
tf.enable_eager_execution()

def logistic_regression(features):
    hypothesis  = tf.div(1., 1. + tf.exp(tf.matmul(features, W) + b))
    return hypothesis

def grad(hypothesis, features, labels):
    with tf.GradientTape() as tape:
        loss_value = loss_fn(logistic_regression(features),features,labels)
    return tape.gradient(loss_value, [W,b])

for step in range(EPOCHS):
    for features, labels  in tfe.Iterator(dataset):
        grads = grad(logistic_regression(features), features, labels)
        optimizer.apply_gradients(grads_and_vars=zip(grads,[W,b]))
        if step % 100 == 0:
            print("Iter: {}, Loss: {:.4f}".format(step, loss_fn(logistic_regression(features),features,labels)))

# 3.OOP Style
class wide_deep_nn():
    def __init__(self, nb_classes):
        super(wide_deep_nn, self).__init__()
        self.W = tfe.Variable(tf.random_normal([4, nb_classes]), name='weight')
        self.b = tfe.Variable(tf.random_normal([nb_classes]), name='bias')
     
        self.W1 = tf.Variable(tf.random_normal([2, nb_classes]), name='weight1')
        self.b1 = tf.Variable(tf.random_normal([nb_classes]), name='bias1')

        self.W2 = tf.Variable(tf.random_normal([nb_classes, nb_classes]), name='weight2')
        self.b2 = tf.Variable(tf.random_normal([nb_classes]), name='bias2')

        self.W3 = tf.Variable(tf.random_normal([nb_classes, nb_classes]), name='weight3')
        self.b3 = tf.Variable(tf.random_normal([nb_classes]), name='bias3')

        self.W4 = tf.Variable(tf.random_normal([nb_classes, 1]), name='weight4')
        self.b4 = tf.Variable(tf.random_normal([1]), name='bias4')
        self.variables = [self.W,self.b,self.W1,self.b1,self.W2,self.b2,self.W3,self.b3,self.W4,self.b4]
        
    def preprocess_data(self, features, labels):
        features = tf.cast(features, tf.float32)
        labels = tf.cast(labels, tf.float32)
        return features, labels
        
    def deep_nn(self, features):
        layer1 = tf.sigmoid(tf.matmul(features, self.W1) + self.b1)
        layer2 = tf.sigmoid(tf.matmul(layer1, self.W2) + self.b2)
        layer3 = tf.sigmoid(tf.matmul(layer2, self.W3) + self.b3)
        hypothesis = tf.sigmoid(tf.matmul(layer3, self.W4) + self.b4)
        return hypothesis
    
    def loss_fn(self, hypothesis, features, labels):
        cost = -tf.reduce_mean(labels * tf.log(hypothesis) + (1 - labels) * tf.log(1 - hypothesis))
        return cost

    def accuracy_fn(self, hypothesis, labels):
        predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
        accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, labels), dtype=tf.float32))
        return accuracy

    def grad(self, hypothesis, features, labels):
        with tf.GradientTape() as tape:
            loss_value = self.loss_fn(self.deep_nn(features),features,labels)
        return tape.gradient(loss_value,self.variables)
    
    def fit(self, dataset, EPOCHS=20000, verbose=500):
        optimizer =  tf.train.GradientDescentOptimizer(learning_rate=0.01)
        for step in range(EPOCHS):
            for features, labels  in tfe.Iterator(dataset):
                features, labels = self.preprocess_data(features, labels)
                grads = self.grad(self.deep_nn(features), features, labels)
                optimizer.apply_gradients(grads_and_vars=zip(grads, self.variables))
                if step % verbose == 0:
                    print("Iter: {}, Loss: {:.4f}".format(step, self.loss_fn(self.deep_nn(features),features,labels)))

    def test_model(self,x_data, y_data):
        x_data, y_data = self.preprocess_data(x_data, y_data)
        test_acc = self.accuracy_fn(self.deep_nn(x_data),y_data)
        print("Testset Accuracy: {:.4f}".format(test_acc))

# 4.Keras Style
model = tf.keras.models.Sequential([
  tf.keras.layers.Flatten(),
  tf.keras.layers.Dense(512, activation=tf.nn.relu),
  tf.keras.layers.Dropout(0.2),
  tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test)





[Reference]
https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md
https://medium.com/tensorflow/whats-coming-in-tensorflow-2-0-d3663832e9b8
codes : https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/beginner/tf2_overview.ipynb
https://motlabs.github.io/2019-01-12/tf2preview-review/

Leave a Reply

Your email address will not be published. Required fields are marked *