tensorflow(三十七):卷积神经网络——CIFAR100与VGG实战
内容导读
互联网集市收集整理的这篇技术教程文章主要介绍了tensorflow(三十七):卷积神经网络——CIFAR100与VGG实战,小编现在分享给大家,供广大互联网技能从业者学习和参考。文章包含3409字,纯文字阅读大概需要5分钟。
内容图文
一、网络结构
二、代码
import os os.environ['TF_CPP_MIN_LOG_LEVEL']='2' import tensorflow as tf from tensorflow.keras import layers, optimizers, datasets, Sequential tf.random.set_seed(2345) conv_layers = [ # 5 units of conv + max pooling # unit 1 layers.Conv2D(64, kernel_size=[3, 3], padding="same", activation=tf.nn.relu), layers.Conv2D(64, kernel_size=[3, 3], padding="same", activation=tf.nn.relu), layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'), # unit 2 layers.Conv2D(128, kernel_size=[3, 3], padding="same", activation=tf.nn.relu), layers.Conv2D(128, kernel_size=[3, 3], padding="same", activation=tf.nn.relu), layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'), # unit 3 layers.Conv2D(256, kernel_size=[3, 3], padding="same", activation=tf.nn.relu), layers.Conv2D(256, kernel_size=[3, 3], padding="same", activation=tf.nn.relu), layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'), # unit 4 layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu), layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu), layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'), # unit 5 layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu), layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu), layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same') ] def preprocess(x, y): # [0~1] x = tf.cast(x, dtype=tf.float32) / 255. y = tf.cast(y, dtype=tf.int32) return x,y (x,y), (x_test, y_test) = datasets.cifar100.load_data() y = tf.squeeze(y, axis=1) y_test = tf.squeeze(y_test, axis=1) print(x.shape, y.shape, x_test.shape, y_test.shape) train_db = tf.data.Dataset.from_tensor_slices((x,y)) train_db = train_db.shuffle(1000).map(preprocess).batch(128) test_db = tf.data.Dataset.from_tensor_slices((x_test,y_test)) test_db = test_db.map(preprocess).batch(64) sample = next(iter(train_db)) print('sample:', sample[0].shape, sample[1].shape, tf.reduce_min(sample[0]), tf.reduce_max(sample[0])) def main(): # [b, 32, 32, 3] => [b, 1, 1, 512] conv_net = Sequential(conv_layers) fc_net = Sequential([ layers.Dense(256, activation=tf.nn.relu), layers.Dense(128, activation=tf.nn.relu), layers.Dense(100, activation=None), ]) conv_net.build(input_shape=[None, 32, 32, 3]) fc_net.build(input_shape=[None, 512]) optimizer = optimizers.Adam(lr=1e-4) # [1, 2] + [3, 4] => [1, 2, 3, 4] variables = conv_net.trainable_variables + fc_net.trainable_variables for epoch in range(50): for step, (x,y) in enumerate(train_db): with tf.GradientTape() as tape: # [b, 32, 32, 3] => [b, 1, 1, 512] out = conv_net(x) # flatten, => [b, 512] out = tf.reshape(out, [-1, 512]) # [b, 512] => [b, 100] logits = fc_net(out) # [b] => [b, 100] y_onehot = tf.one_hot(y, depth=100) # compute loss loss = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits=True) loss = tf.reduce_mean(loss) grads = tape.gradient(loss, variables) optimizer.apply_gradients(zip(grads, variables)) if step %100 == 0: print(epoch, step, 'loss:', float(loss)) total_num = 0 total_correct = 0 for x,y in test_db: out = conv_net(x) out = tf.reshape(out, [-1, 512]) logits = fc_net(out) prob = tf.nn.softmax(logits, axis=1) pred = tf.argmax(prob, axis=1) pred = tf.cast(pred, dtype=tf.int32) correct = tf.cast(tf.equal(pred, y), dtype=tf.int32) correct = tf.reduce_sum(correct) total_num += x.shape[0] total_correct += int(correct) acc = total_correct / total_num print(epoch, 'acc:', acc) if __name__ == '__main__': main()
内容总结
以上是互联网集市为您收集整理的tensorflow(三十七):卷积神经网络——CIFAR100与VGG实战全部内容,希望文章能够帮你解决tensorflow(三十七):卷积神经网络——CIFAR100与VGG实战所遇到的程序开发问题。 如果觉得互联网集市技术教程内容还不错,欢迎将互联网集市网站推荐给程序员好友。
内容备注
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 gblab@vip.qq.com 举报,一经查实,本站将立刻删除。
内容手机端
扫描二维码推送至手机访问。