Академический Документы
Профессиональный Документы
Культура Документы
Project Report
Main.py
import numpy as np
import tensorflow as tf
import pprint
import os
flags = tf.app.flags
flags.DEFINE_string("arch", "FSRCNN", "Model name [FSRCNN]")
flags.DEFINE_boolean("fast", False, "Use the fast model (FSRCNN-s) [False]")
flags.DEFINE_integer("epoch", 10, "Number of epochs [10]")
flags.DEFINE_integer("batch_size", 32, "The size of batch images [32]")
flags.DEFINE_float("learning_rate", 1e-4, "The learning rate of the adam optimizer [1e-4]")
flags.DEFINE_integer("scale", 2, "The size of scale factor for preprocessing input image [2]")
flags.DEFINE_integer("radius", 1, "Max radius of the deconvolution input tensor [1]")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Name of checkpoint directory
[checkpoint]")
flags.DEFINE_string("output_dir", "result", "Name of test output directory [result]")
flags.DEFINE_string("data_dir", "Train", "Name of data directory to train on [FastTrain]")
flags.DEFINE_boolean("train", True, "True for training, false for testing [True]")
flags.DEFINE_integer("threads", 1, "Number of processes to pre-process data with [1]")
flags.DEFINE_boolean("distort", False, "Distort some images with JPEG compression
artifacts after downscaling [False]")
flags.DEFINE_boolean("params", False, "Save weight and bias parameters [False]")
FLAGS = flags.FLAGS
pp = pprint.PrettyPrinter()
def main(_):
pp.pprint(flags.FLAGS.__flags)
if FLAGS.fast:
FLAGS.checkpoint_dir = 'fast_{}'.format(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
if __name__ == '__main__':
tf.app.run()
FSRCNN.py
import tensorflow as tf
from utils import tf_ssim
class Model(object):
def model(self):
d, s, m, r = self.model_params
# Feature Extraction
size = self.padding + 1
weights = tf.get_variable('w1', shape=[size, size, 1, d],
initializer=tf.variance_scaling_initializer())
biases = tf.get_variable('b1', initializer=tf.zeros([d]))
features = tf.nn.conv2d(self.images, weights, strides=[1,1,1,1], padding='VALID',
data_format='NHWC')
features = tf.nn.bias_add(features, biases, data_format='NHWC')
# Shrinking
if self.model_params[1] > 0:
features = self.prelu(features, 1)
weights = tf.get_variable('w2', shape=[1, 1, d, s],
initializer=tf.variance_scaling_initializer())
biases = tf.get_variable('b2', initializer=tf.zeros([s]))
features = tf.nn.conv2d(features, weights, strides=[1,1,1,1], padding='SAME',
data_format='NHWC')
features = tf.nn.bias_add(features, biases, data_format='NHWC')
else:
s=d
conv = features
# Mapping (# mapping layers = m)
with tf.variable_scope("mapping_block") as scope:
for ri in range(r):
for i in range(3, m + 3):
weights = tf.get_variable('w{}'.format(i), shape=[3, 3, s, s],
initializer=tf.variance_scaling_initializer())
biases = tf.get_variable('b{}'.format(i), initializer=tf.zeros([s]))
if i > 3:
conv = self.prelu(conv, i)
conv = tf.nn.conv2d(conv, weights, strides=[1,1,1,1], padding='SAME',
data_format='NHWC')
conv = tf.nn.bias_add(conv, biases, data_format='NHWC')
if i == m + 2:
conv = self.prelu(conv, m + 3)
weights = tf.get_variable('w{}'.format(m + 3), shape=[1, 1, s, s],
initializer=tf.variance_scaling_initializer())
biases = tf.get_variable('b{}'.format(m + 3), initializer=tf.zeros([s]))
conv = tf.nn.conv2d(conv, weights, strides=[1,1,1,1], padding='SAME',
data_format='NHWC')
conv = tf.nn.bias_add(conv, biases, data_format='NHWC')
conv = tf.add(conv, features)
scope.reuse_variables()
conv = self.prelu(conv, 2)
# Expanding
if self.model_params[1] > 0:
expand_weights = tf.get_variable('w{}'.format(m + 4), shape=[1, 1, s, d],
initializer=tf.variance_scaling_initializer())
expand_biases = tf.get_variable('b{}'.format(m + 4), initializer=tf.zeros([d]))
conv = tf.nn.conv2d(conv, expand_weights, strides=[1,1,1,1], padding='SAME',
data_format='NHWC')
conv = tf.nn.bias_add(conv, expand_biases, data_format='NHWC')
conv = self.prelu(conv, m + 4)
# Sub-pixel convolution
size = self.radius * 2 + 1
deconv_weights = tf.get_variable('deconv_w', shape=[size, size, d, self.scale**2],
initializer=tf.variance_scaling_initializer(scale=0.01))
deconv_biases = tf.get_variable('deconv_b', initializer=tf.zeros([self.scale**2]))
deconv = tf.nn.conv2d(conv, deconv_weights, strides=[1,1,1,1], padding='SAME',
data_format='NHWC')
deconv = tf.nn.bias_add(deconv, deconv_biases, data_format='NHWC')
deconv = tf.depth_to_space(deconv, self.scale, name='pixel_shuffle',
data_format='NHWC')
return deconv
CSFM.py
import tensorflow as tf
class Model(object):
size = self.padding + 1
features = tf.contrib.layers.conv2d(self.images, d, size, 1, 'VALID', 'NHWC',
activation_fn=None, scope='features')
shortcuts = conv
with tf.variable_scope("upscaling"):
conv = tf.nn.leaky_relu(conv)
conv = tf.contrib.layers.conv2d(conv, d * self.scale**2, 3, 1, 'SAME', 'NHWC',
activation_fn=None, scope='sub-pixel_conv')
conv = tf.depth_to_space(conv, self.scale, name='pixel_shuffle', data_format='NHWC')
conv = tf.contrib.layers.conv2d(conv, 1, 3, 1, 'SAME', 'NHWC', activation_fn=None,
scope='final')
return conv
Utils.py
"""
Scipy version > 0.18 is needed, due to 'mode' option from scipy.misc.imread function
"""
import os
import glob
from math import ceil
import subprocess
import io
from random import randrange, shuffle
import tensorflow as tf
from PIL import Image
import numpy as np
from multiprocessing import Pool, Lock, active_children
FLAGS = tf.app.flags.FLAGS
downsample = True
For train dataset, output data would be ['.../t1.bmp', '.../t2.bmp', ..., '.../t99.bmp']
"""
if FLAGS.train:
data_dir = os.path.join(os.getcwd(), dataset)
data = []
for files in ('*.bmp', '*.png'):
data.extend(glob.glob(os.path.join(data_dir, files)))
shuffle(data)
else:
data_dir = os.path.join(os.sep, (os.path.join(os.getcwd(), dataset)), "Set5")
data = sorted(glob.glob(os.path.join(data_dir, "*.bmp")))
return data
def train_input_worker(args):
image_data, config = args
image_size, label_size, stride, scale, padding, distort = config
single_input_sequence, single_label_sequence = [], []
if len(input_.shape) == 3:
h, w, _ = input_.shape
else:
h, w = input_.shape
single_input_sequence.append(sub_input)
single_label_sequence.append(sub_label)
def thread_train_setup(config):
"""
Spawns |config.threads| worker processes to pre-process the data
This has not been extensively tested so use at your own risk.
Also this is technically multiprocessing not threading, I just say thread
because it's shorter to type.
"""
if downsample == False:
import sys
sys.exit()
sess = config.sess
pool.close()
results = []
for i in range(len(workers)):
print("Waiting for worker process {}".format(i))
results.extend(workers[i].get(timeout=240))
print("Worker process {} done".format(i))
arrdata = np.asarray(sub_input_sequence)
arrlabel = np.asarray(sub_label_sequence)
def train_input_setup(config):
"""
Read image files, make their sub-images, and save them as a h5 file format.
"""
if downsample == False:
import sys
sys.exit()
sess = config.sess
image_size, label_size, stride, scale, padding = config.image_size, config.label_size,
config.stride, config.scale, config.padding // 2
for i in range(len(data)):
input_, label_ = preprocess(data[i], scale, distort=config.distort)
if len(input_.shape) == 3:
h, w, _ = input_.shape
else:
h, w = input_.shape
sub_input_sequence.append(sub_input)
sub_label_sequence.append(sub_label)
arrdata = np.asarray(sub_input_sequence)
arrlabel = np.asarray(sub_label_sequence)
def test_input_setup(config):
sess = config.sess
if len(input_.shape) == 3:
h, w, _ = input_.shape
else:
h, w = input_.shape
if len(label_.shape) == 3:
h, w, _ = label_.shape
else:
h, w = label_.shape
return img
if not os.path.exists(param_dir):
os.makedirs(param_dir)
if len(weights.shape) < 4:
h.write("{}\n\n".format(weights.flatten().tolist()))
else:
h.write("[")
sep = False
for filter_x in range(len(weights)):
for filter_y in range(len(weights[filter_x])):
filter_weights = weights[filter_x][filter_y]
for input_channel in range(len(filter_weights)):
for output_channel in range(len(filter_weights[input_channel])):
val = filter_weights[input_channel][output_channel]
if sep:
h.write(', ')
h.write("{}".format(val))
sep = True
h.write("\n ")
h.write("]\n\n")
h.close()
x = tf.constant(x_data, dtype=tf.float32)
y = tf.constant(y_data, dtype=tf.float32)
g = tf.exp(-((x**2 + y**2)/(2.0*sigma**2)))
return g / tf.reduce_sum(g)
if cs_map:
value = (2.0*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2)
else:
value = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
(sigma1_sq + sigma2_sq + C2))
if mean_metric:
value = tf.reduce_mean(value)
return value
return value
model.py
import time
import os
import importlib
from random import randrange
import numpy as np
import tensorflow as tf
from PIL import Image
import pdb
# Based on http://mmlab.ie.cuhk.edu.hk/projects/FSRCNN.html
class Model(object):
self.padding = 4
# Different image/label sub-sizes for different scaling factors x2, x3, x4
scale_factors = [[20 + self.padding, 40], [14 + self.padding, 42], [12 + self.padding, 48]]
self.image_size, self.label_size = scale_factors[self.scale - 2]
self.checkpoint_dir = config.checkpoint_dir
self.output_dir = config.output_dir
self.data_dir = config.data_dir
self.init_model()
def init_model(self):
if self.train:
self.images = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, 1],
name='images')
self.labels = tf.placeholder(tf.float32, [None, self.label_size, self.label_size, 1],
name='labels')
else:
self.images = tf.placeholder(tf.float32, [None, None, None, 1], name='images')
self.labels = tf.placeholder(tf.float32, [None, None, None, 1], name='labels')
# Batch size differs in training vs testing
self.batch = tf.placeholder(tf.int32, shape=[], name='batch')
model = importlib.import_module(self.arch)
self.model = model.Model(self)
self.pred = self.model.model()
model_dir = "%s_%s_%s_%s" % (self.model.name.lower(), self.label_size, '-'.join(str(i) for
i in self.model.model_params), "r"+str(self.radius))
self.model_dir = os.path.join(self.checkpoint_dir, model_dir)
self.saver = tf.train.Saver()
def run(self):
global_step = tf.Variable(0, trainable=False)
optimizer = tf.train.AdamOptimizer(self.learning_rate)
deconv_mult = lambda grads: list(map(lambda x: (x[0] * 1.0, x[1]) if 'deconv' in x[1].name
else x, grads))
grads = deconv_mult(optimizer.compute_gradients(self.loss))
self.train_op = optimizer.apply_gradients(grads, global_step=global_step)
tf.global_variables_initializer().run()
if self.load():
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
if self.params:
save_params(self.sess, self.model.model_params)
elif self.train:
self.run_train()
else:
self.run_test()
def run_train(self):
start_time = time.time()
print("Beginning training setup...")
if self.threads == 1:
train_data, train_label = train_input_setup(self)
else:
train_data, train_label = thread_train_setup(self)
print("Training setup took {} seconds with {} threads".format(time.time() - start_time,
self.threads))
print("Training...")
start_time = time.time()
start_average, end_average, counter = 0, 0, 0
for ep in range(self.epoch):
# Run by batch images
batch_idxs = len(train_data) // self.batch_size
batch_average = 0
for idx in range(0, batch_idxs):
batch_images = train_data[idx * self.batch_size : (idx + 1) * self.batch_size]
batch_labels = train_label[idx * self.batch_size : (idx + 1) * self.batch_size]
if counter % 10 == 0:
print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" \
% ((ep+1), counter, time.time() - start_time, err))
# Compare loss of the first 20% and the last 20% epochs
start_average = float(start_average) / (self.epoch * 0.2)
end_average = float(end_average) / (self.epoch * 0.2)
print("Start Average: [%.6f], End Average: [%.6f], Improved: [%.2f%%]" \
% (start_average, end_average, 100 - (100*end_average/start_average)))
def run_test(self):
test_data, test_label = test_input_setup(self)
print("Testing...")
start_time = time.time()
result = np.clip(self.pred.eval({self.images: test_data, self.labels: test_label, self.batch: 1}),
0, 1)
passed = time.time() - start_time
img1 = tf.convert_to_tensor(test_label, dtype=tf.float32)
img2 = tf.convert_to_tensor(result, dtype=tf.float32)
psnr = self.sess.run(tf.image.psnr(img1, img2, 1))
ssim = self.sess.run(tf.image.ssim(img1, img2, 1))
print("Took %.3f seconds, PSNR: %.6f, SSIM: %.6f" % (passed, psnr, ssim))
array_image_save(result, image_path)
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
self.saver.save(self.sess,
os.path.join(self.model_dir, model_name),
global_step=step)
def load(self):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(self.model_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(self.model_dir, ckpt_name))
return True
else:
return False
gen.py
import sys
import math
from itertools import islice
radius = 1
def base_header(file):
file.write('//!HOOK LUMA\n')
file.write('//!WHEN OUTPUT.w LUMA.w / {0}.400 > OUTPUT.h LUMA.h / {0}.400 >
*\n'.format(scale - 1))
def header6(file):
base_header(file)
file.write('//!WIDTH LUMA.w {} *\n'.format(scale))
file.write('//!HEIGHT LUMA.h {} *\n'.format(scale))
file.write('//!DESC aggregation\n')
for i in range(scale**2//comps):
file.write('//!BIND SUBCONV{}\n'.format(i + 1))
def main():
if len(sys.argv) == 2:
fname=sys.argv[1]
d, s, m, r = [int(i) for i in fname[7:fname.index('.')].split("_")]
if s == 0:
s=d
shrinking = False
else:
shrinking = True
global scale, comps
deconv_biases = read_weights(fname, get_line_number("deconv_b", fname))
scale = int(math.sqrt(len(deconv_biases[0].split(","))))
dst = fname.replace("_", "-").replace("weights",
"FSRCNNX_x{}_".format(scale)).replace("txt", "glsl")
with open(dst, 'w') as file:
# Feature layer
feature_radius = 2
ln = get_line_number("w1", fname)
weights = read_weights(fname, ln, (feature_radius*2+1)**2)
ln = get_line_number("b1", fname)
biases = read_weights(fname, ln)
for n in range(0, d, 4):
header1(file, n, d)
file.write('vec4 hook()\n')
file.write('{\n')
file.write('vec4 res = vec4({});\n'.format(format_weights(biases[0], n)))
p=0
for l in range(0, len(weights)):
y, x = p%(feature_radius*2+1)-feature_radius, p//(feature_radius*2+1)-
feature_radius
p += 1
file.write('res += vec4({}) *
float(LUMA_texOff(vec2({},{})));\n'.format(format_weights(weights[l], n), x, y))
if shrinking:
ln = get_line_number("alpha1", fname)
alphas = read_weights(fname, ln)
file.write('res = max(res, vec4(0.0)) + vec4({}) * min(res,
vec4(0.0));\n'.format(format_weights(alphas[0], n)))
file.write('return res;\n')
file.write('}\n\n')
if shrinking:
# Shrinking layer
ln = get_line_number("w2", fname)
weights = read_weights(fname, ln, d)
ln = get_line_number("b2", fname)
biases = read_weights(fname, ln)
for n in range(0, s, 4):
header2(file, d, n, s)
file.write('vec4 hook()\n')
file.write('{\n')
file.write('vec4 res = vec4({});\n'.format(format_weights(biases[0], n)))
for l in range(0, d, 4):
file.write('res += mat4({},{},{},{}) *
FEATURE{}_texOff(vec2(0.0));\n'.format(format_weights(weights[l], n),
format_weights(weights[l+1], n), format_weights(weights[l+2], n),
format_weights(weights[l+3], n), l//4+1))
file.write('return res;\n')
file.write('}\n\n')
# Mapping layers
inp = "SHRINKED" if shrinking else "FEATURE"
for ri in range(r):
for mi in range(m):
tex_name = inp if ri == 0 and mi == 0 else "RES" if ri > 0 and mi == 0 else "MODEL"
ln = get_line_number("w{}".format(mi + 3), fname)
weights = read_weights(fname, ln, s*9)
ln = get_line_number("b{}".format(mi + 3), fname)
biases = read_weights(fname, ln)
for n in range(0, s, 4):
header3(file, ri, mi, m, n, s, tex_name)
file.write('vec4 hook()\n')
file.write('{\n')
file.write('vec4 res = vec4({});\n'.format(format_weights(biases[0], n)))
p=0
for l in range(0, len(weights), 4):
if l % s == 0:
y, x = p%3-1, p//3-1
p += 1
idx = (l//4)%(s//4)
file.write('res += mat4({},{},{},{}) * {}{}_texOff(vec2({},{}));\n'.format(
format_weights(weights[l], n), format_weights(weights[l+1], n),
format_weights(weights[l+2], n), format_weights(weights[l+3], n),
tex_name, idx + 1 + (20 if (ri * m + mi) % 2 == 1 else 0), x, y))
ln = get_line_number("alpha{}".format(m + 3 if mi == m - 1 else mi + 4), fname)
alphas = read_weights(fname, ln)
file.write('res = max(res, vec4(0.0)) + vec4({}) * min(res,
vec4(0.0));\n'.format(format_weights(alphas[0], n)))
file.write('return res;\n')
file.write('}\n\n')
if mi == m - 1:
ln = get_line_number("w{}".format(m + 3), fname)
weights = read_weights(fname, ln, s*(mi+2))
ln = get_line_number("b{}".format(m + 3), fname)
biases = read_weights(fname, ln)
for n in range(0, s, 4):
header3_1(file, ri, mi, m, n, s, inp)
file.write('vec4 hook()\n')
file.write('{\n')
file.write('vec4 res = vec4({});\n'.format(format_weights(biases[0], n)))
for l in range(0, s, 4):
file.write('res += mat4({},{},{},{}) * MODEL{}_texOff(0);\n'.format(
format_weights(weights[l], n), format_weights(weights[l+1], n),
format_weights(weights[l+2], n), format_weights(weights[l+3], n),
l//4 + 1 + (20 if (ri * m + mi) % 2 == 0 else 0)))
file.write('res += {}{}_texOff(0);\n'.format(inp, (n//4)%(s//4) + 1))
if ri == r - 1:
ln = get_line_number("alpha2", fname)
alphas = read_weights(fname, ln)
file.write('res = max(res, vec4(0.0)) + vec4({}) * min(res,
vec4(0.0));\n'.format(format_weights(alphas[0], n)))
file.write('return res;\n')
file.write('}\n\n')
if shrinking:
# Expanding layer
ln = get_line_number("w{}".format(m + 4), fname)
weights = read_weights(fname, ln, d)
ln = get_line_number("b{}".format(m + 4), fname)
biases = read_weights(fname, ln)
ln = get_line_number("alpha{}".format(m + 4), fname)
alphas = read_weights(fname, ln)
for n in range(0, d, 4):
header4(file, s, m, r, n, d)
file.write('vec4 hook()\n')
file.write('{\n')
file.write('vec4 res = vec4({});\n'.format(format_weights(biases[0], n)))
for l in range(0, s, 4):
file.write('res += mat4({},{},{},{}) *
RES{}_texOff(vec2(0.0));\n'.format(format_weights(weights[l], n),
format_weights(weights[l+1], n), format_weights(weights[l+2], n),
format_weights(weights[l+3], n),
l//4 + 1))
file.write('res = max(res, vec4(0.0)) + vec4({}) * min(res,
vec4(0.0));\n'.format(format_weights(alphas[0], n)))
file.write('return res;\n')
file.write('}\n\n')
# Sub-pixel convolution
ln = get_line_number("deconv_w", fname)
weights = read_weights(fname, ln, d*(radius*2+1)**2)
ln = get_line_number("deconv_b", fname)
biases = read_weights(fname, ln)
inp = "EXPANDED" if shrinking else "RES"
comps = 3 if scale == 3 else 4
for n in range(0, scale**2, comps):
header5(file, n, d, inp)
file.write('vec4 hook()\n')
file.write('{\n')
file.write('vec{0} res = vec{0}({1});\n'.format(comps, format_weights(biases[0], n,
length=comps)))
p=0
for l in range(0, len(weights), 4):
if l % d == 0:
y, x = p%(radius*2+1)-radius, p//(radius*2+1)-radius
p += 1
idx = (l//4)%(d//4)
file.write('res += mat4x{}({},{},{},{}) * {}{}_texOff(vec2({},{}));\n'.format(
comps, format_weights(weights[l], n, length=comps),
format_weights(weights[l+1], n, length=comps),
format_weights(weights[l+2], n, length=comps),
format_weights(weights[l+3], n, length=comps),
inp, idx + 1, x, y))
if comps == 4:
file.write('return res;\n')
else:
file.write('return vec4(res, 0);\n')
file.write('}\n\n')
# Aggregation
header6(file)
file.write('vec4 hook()\n')
file.write('{\n')
file.write('vec2 fcoord = fract(SUBCONV1_pos * SUBCONV1_size);\n')
file.write('vec2 base = SUBCONV1_pos + (vec2(0.5) - fcoord) * SUBCONV1_pt;\n')
file.write('ivec2 index = ivec2(fcoord * vec2({}));\n'.format(scale))
if scale > 2:
file.write('mat{0} res = mat{0}(SUBCONV1_tex(base).{1}'.format(scale,
"rgba"[:comps]))
for i in range(scale-1):
file.write(',SUBCONV{}_tex(base).{}'.format(i + 2, "rgba"[:comps]))
file.write(');\n')
file.write('return vec4(res[index.x][index.y], 0, 0, 1);\n')
else:
file.write('vec4 res = SUBCONV1_tex(base);\n')
file.write('return vec4(res[index.x * {} + index.y], 0, 0, 1);\n'.format(scale))
file.write('}\n')
else:
print("Missing argument: You must specify a file name")
return
if __name__ == '__main__':
main()
ESPCN.py
import tensorflow as tf
from utils import tf_ssim
class Model(object):
def model(self):
d = self.model_params
m = len(d) + 2
# Feature Extraction
size = self.padding + 1
weights = tf.get_variable('w1', shape=[size, size, 1, d[0]],
initializer=tf.variance_scaling_initializer())
biases = tf.get_variable('b1', initializer=tf.zeros([d[0]]))
conv = tf.nn.conv2d(self.images, weights, strides=[1,1,1,1], padding='VALID',
data_format='NHWC')
conv = tf.nn.bias_add(conv, biases, data_format='NHWC')
conv = self.prelu(conv, 1)
# Sub-pixel convolution
size = self.radius * 2 + 1
deconv_weights = tf.get_variable('deconv_w', shape=[size, size, d[-1], self.scale**2],
initializer=tf.variance_scaling_initializer(scale=0.01))
deconv_biases = tf.get_variable('deconv_b', initializer=tf.zeros([self.scale**2]))
deconv = tf.nn.conv2d(conv, deconv_weights, strides=[1,1,1,1], padding='SAME',
data_format='NHWC')
deconv = tf.nn.bias_add(deconv, deconv_biases, data_format='NHWC')
deconv = tf.depth_to_space(deconv, self.scale, name='pixel_shuffle',
data_format='NHWC')
return deconv
LapSRN.py
import tensorflow as tf
from utils import tf_ssim
class Model(object):
def model(self):
d = self.model_params
m = len(d) + 2
# Feature Extraction
size = self.padding + 1
weights = tf.get_variable('w1', shape=[size, size, 1, d[0]],
initializer=tf.variance_scaling_initializer())
biases = tf.get_variable('b1', initializer=tf.zeros([d[0]]))
conv = tf.nn.conv2d(self.images, weights, strides=[1,1,1,1], padding='VALID',
data_format='NHWC')
conv = tf.nn.bias_add(conv, biases, data_format='NHWC')
conv = self.prelu(conv, 1)
# Sub-pixel convolution
size = self.radius * 2 + 1
deconv_weights = tf.get_variable('deconv_w', shape=[size, size, d[-1], self.scale**2],
initializer=tf.variance_scaling_initializer(scale=0.01))
deconv_biases = tf.get_variable('deconv_b', initializer=tf.zeros([self.scale**2]))
deconv = tf.nn.conv2d(conv, deconv_weights, strides=[1,1,1,1], padding='SAME',
data_format='NHWC')
deconv = tf.nn.bias_add(deconv, deconv_biases, data_format='NHWC')
deconv = tf.depth_to_space(deconv, self.scale, name='pixel_shuffle',
data_format='NHWC')
return deconv
Result -