A strange bug in tf.scatter_add when I implement unpool in tensorflow
I'm trying to implement unpool in tensorflow with tf.scatter_add, but I meet a strange bug, here is my code:
import tensorflow as tf
import numpy as np
import random
tf.reset_default_graph()
mat = list(range(64))
random.shuffle(mat)
mat = np.array(mat)
mat = np.reshape(mat, [1,8,8,1])
M = tf.constant(mat, dtype=tf.float32)
pool1, argmax1 = tf.nn.max_pool_with_argmax(M, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
pool2, argmax2 = tf.nn.max_pool_with_argmax(pool1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
pool3, argmax3 = tf.nn.max_pool_with_argmax(pool2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
def unpool(x, argmax, strides, unpool_shape=None, batch_size=None, name='unpool'):
x_shape = x.get_shape().as_list()
argmax_shape = argmax.get_shape().as_list()
assert not(x_shape[0] is None and batch_size is None), "must input batch_size if number of batch is alterable"
if x_shape[0] is None:
x_shape[0] = batch_size
if argmax_shape[0] is None:
argmax_shape[0] = x_shape[0]
if unpool_shape is None:
unpool_shape = [x_shape[i] * strides[i] for i in range(4)]
x_unpool = tf.get_variable(name=name, shape=[np.prod(unpool_shape)], initializer=tf.zeros_initializer(), trainable=False)
argmax = tf.cast(argmax, tf.int32)
argmax = tf.reshape(argmax, [np.prod(argmax_shape)])
x = tf.reshape(x, [np.prod(argmax.get_shape().as_list())])
x_unpool = tf.scatter_add(x_unpool , argmax, x)
x_unpool = tf.reshape(x_unpool , unpool_shape)
return x_unpool
unpool2 = unpool(pool3, argmax3, strides=[1,2,2,1], name='unpool3')
unpool1 = unpool(unpool2, argmax2, strides=[1,2,2,1], name='unpool2')
unpool0 = unpool(unpool1, argmax1, strides=[1,2,2,1], name='unpool1')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
mat_out = mat[:,:,:,0]
pool1_out = sess.run(pool1)[0,:,:,0]
pool2_out = sess.run(pool2)[0,:,:,0]
pool3_out = sess.run(pool3)[0,:,:,0]
argmax1_out = sess.run(argmax1)[0,:,:,0]
argmax2_out = sess.run(argmax2)[0,:,:,0]
argmax3_out = sess.run(argmax3)[0,:,:,0]
unpool2_out = sess.run(unpool2)[0,:,:,0]
unpool1_out = sess.run(unpool1)[0,:,:,0]
unpool0_out = sess.run(unpool0)[0,:,:,0]
print(unpool2_out)
print(unpool1_out)
print(unpool0_out)
output:
[[ 0. 0.]
[ 0. 63.]]
[[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 126. 0.]
[ 0. 0. 0. 0.]]
[[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 315. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]]
The location is right, but the value is wrong. unpool2 is right, unpool1 is double of expected value, and unpool2 is quintuple of expected value. I don't know what's wrong, can anyone tell me how to fix this bug?
Great thanks in advance.
python tensorflow
add a comment |
I'm trying to implement unpool in tensorflow with tf.scatter_add, but I meet a strange bug, here is my code:
import tensorflow as tf
import numpy as np
import random
tf.reset_default_graph()
mat = list(range(64))
random.shuffle(mat)
mat = np.array(mat)
mat = np.reshape(mat, [1,8,8,1])
M = tf.constant(mat, dtype=tf.float32)
pool1, argmax1 = tf.nn.max_pool_with_argmax(M, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
pool2, argmax2 = tf.nn.max_pool_with_argmax(pool1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
pool3, argmax3 = tf.nn.max_pool_with_argmax(pool2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
def unpool(x, argmax, strides, unpool_shape=None, batch_size=None, name='unpool'):
x_shape = x.get_shape().as_list()
argmax_shape = argmax.get_shape().as_list()
assert not(x_shape[0] is None and batch_size is None), "must input batch_size if number of batch is alterable"
if x_shape[0] is None:
x_shape[0] = batch_size
if argmax_shape[0] is None:
argmax_shape[0] = x_shape[0]
if unpool_shape is None:
unpool_shape = [x_shape[i] * strides[i] for i in range(4)]
x_unpool = tf.get_variable(name=name, shape=[np.prod(unpool_shape)], initializer=tf.zeros_initializer(), trainable=False)
argmax = tf.cast(argmax, tf.int32)
argmax = tf.reshape(argmax, [np.prod(argmax_shape)])
x = tf.reshape(x, [np.prod(argmax.get_shape().as_list())])
x_unpool = tf.scatter_add(x_unpool , argmax, x)
x_unpool = tf.reshape(x_unpool , unpool_shape)
return x_unpool
unpool2 = unpool(pool3, argmax3, strides=[1,2,2,1], name='unpool3')
unpool1 = unpool(unpool2, argmax2, strides=[1,2,2,1], name='unpool2')
unpool0 = unpool(unpool1, argmax1, strides=[1,2,2,1], name='unpool1')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
mat_out = mat[:,:,:,0]
pool1_out = sess.run(pool1)[0,:,:,0]
pool2_out = sess.run(pool2)[0,:,:,0]
pool3_out = sess.run(pool3)[0,:,:,0]
argmax1_out = sess.run(argmax1)[0,:,:,0]
argmax2_out = sess.run(argmax2)[0,:,:,0]
argmax3_out = sess.run(argmax3)[0,:,:,0]
unpool2_out = sess.run(unpool2)[0,:,:,0]
unpool1_out = sess.run(unpool1)[0,:,:,0]
unpool0_out = sess.run(unpool0)[0,:,:,0]
print(unpool2_out)
print(unpool1_out)
print(unpool0_out)
output:
[[ 0. 0.]
[ 0. 63.]]
[[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 126. 0.]
[ 0. 0. 0. 0.]]
[[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 315. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]]
The location is right, but the value is wrong. unpool2 is right, unpool1 is double of expected value, and unpool2 is quintuple of expected value. I don't know what's wrong, can anyone tell me how to fix this bug?
Great thanks in advance.
python tensorflow
add a comment |
I'm trying to implement unpool in tensorflow with tf.scatter_add, but I meet a strange bug, here is my code:
import tensorflow as tf
import numpy as np
import random
tf.reset_default_graph()
mat = list(range(64))
random.shuffle(mat)
mat = np.array(mat)
mat = np.reshape(mat, [1,8,8,1])
M = tf.constant(mat, dtype=tf.float32)
pool1, argmax1 = tf.nn.max_pool_with_argmax(M, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
pool2, argmax2 = tf.nn.max_pool_with_argmax(pool1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
pool3, argmax3 = tf.nn.max_pool_with_argmax(pool2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
def unpool(x, argmax, strides, unpool_shape=None, batch_size=None, name='unpool'):
x_shape = x.get_shape().as_list()
argmax_shape = argmax.get_shape().as_list()
assert not(x_shape[0] is None and batch_size is None), "must input batch_size if number of batch is alterable"
if x_shape[0] is None:
x_shape[0] = batch_size
if argmax_shape[0] is None:
argmax_shape[0] = x_shape[0]
if unpool_shape is None:
unpool_shape = [x_shape[i] * strides[i] for i in range(4)]
x_unpool = tf.get_variable(name=name, shape=[np.prod(unpool_shape)], initializer=tf.zeros_initializer(), trainable=False)
argmax = tf.cast(argmax, tf.int32)
argmax = tf.reshape(argmax, [np.prod(argmax_shape)])
x = tf.reshape(x, [np.prod(argmax.get_shape().as_list())])
x_unpool = tf.scatter_add(x_unpool , argmax, x)
x_unpool = tf.reshape(x_unpool , unpool_shape)
return x_unpool
unpool2 = unpool(pool3, argmax3, strides=[1,2,2,1], name='unpool3')
unpool1 = unpool(unpool2, argmax2, strides=[1,2,2,1], name='unpool2')
unpool0 = unpool(unpool1, argmax1, strides=[1,2,2,1], name='unpool1')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
mat_out = mat[:,:,:,0]
pool1_out = sess.run(pool1)[0,:,:,0]
pool2_out = sess.run(pool2)[0,:,:,0]
pool3_out = sess.run(pool3)[0,:,:,0]
argmax1_out = sess.run(argmax1)[0,:,:,0]
argmax2_out = sess.run(argmax2)[0,:,:,0]
argmax3_out = sess.run(argmax3)[0,:,:,0]
unpool2_out = sess.run(unpool2)[0,:,:,0]
unpool1_out = sess.run(unpool1)[0,:,:,0]
unpool0_out = sess.run(unpool0)[0,:,:,0]
print(unpool2_out)
print(unpool1_out)
print(unpool0_out)
output:
[[ 0. 0.]
[ 0. 63.]]
[[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 126. 0.]
[ 0. 0. 0. 0.]]
[[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 315. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]]
The location is right, but the value is wrong. unpool2 is right, unpool1 is double of expected value, and unpool2 is quintuple of expected value. I don't know what's wrong, can anyone tell me how to fix this bug?
Great thanks in advance.
python tensorflow
I'm trying to implement unpool in tensorflow with tf.scatter_add, but I meet a strange bug, here is my code:
import tensorflow as tf
import numpy as np
import random
tf.reset_default_graph()
mat = list(range(64))
random.shuffle(mat)
mat = np.array(mat)
mat = np.reshape(mat, [1,8,8,1])
M = tf.constant(mat, dtype=tf.float32)
pool1, argmax1 = tf.nn.max_pool_with_argmax(M, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
pool2, argmax2 = tf.nn.max_pool_with_argmax(pool1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
pool3, argmax3 = tf.nn.max_pool_with_argmax(pool2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
def unpool(x, argmax, strides, unpool_shape=None, batch_size=None, name='unpool'):
x_shape = x.get_shape().as_list()
argmax_shape = argmax.get_shape().as_list()
assert not(x_shape[0] is None and batch_size is None), "must input batch_size if number of batch is alterable"
if x_shape[0] is None:
x_shape[0] = batch_size
if argmax_shape[0] is None:
argmax_shape[0] = x_shape[0]
if unpool_shape is None:
unpool_shape = [x_shape[i] * strides[i] for i in range(4)]
x_unpool = tf.get_variable(name=name, shape=[np.prod(unpool_shape)], initializer=tf.zeros_initializer(), trainable=False)
argmax = tf.cast(argmax, tf.int32)
argmax = tf.reshape(argmax, [np.prod(argmax_shape)])
x = tf.reshape(x, [np.prod(argmax.get_shape().as_list())])
x_unpool = tf.scatter_add(x_unpool , argmax, x)
x_unpool = tf.reshape(x_unpool , unpool_shape)
return x_unpool
unpool2 = unpool(pool3, argmax3, strides=[1,2,2,1], name='unpool3')
unpool1 = unpool(unpool2, argmax2, strides=[1,2,2,1], name='unpool2')
unpool0 = unpool(unpool1, argmax1, strides=[1,2,2,1], name='unpool1')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
mat_out = mat[:,:,:,0]
pool1_out = sess.run(pool1)[0,:,:,0]
pool2_out = sess.run(pool2)[0,:,:,0]
pool3_out = sess.run(pool3)[0,:,:,0]
argmax1_out = sess.run(argmax1)[0,:,:,0]
argmax2_out = sess.run(argmax2)[0,:,:,0]
argmax3_out = sess.run(argmax3)[0,:,:,0]
unpool2_out = sess.run(unpool2)[0,:,:,0]
unpool1_out = sess.run(unpool1)[0,:,:,0]
unpool0_out = sess.run(unpool0)[0,:,:,0]
print(unpool2_out)
print(unpool1_out)
print(unpool0_out)
output:
[[ 0. 0.]
[ 0. 63.]]
[[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 126. 0.]
[ 0. 0. 0. 0.]]
[[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 315. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]]
The location is right, but the value is wrong. unpool2 is right, unpool1 is double of expected value, and unpool2 is quintuple of expected value. I don't know what's wrong, can anyone tell me how to fix this bug?
Great thanks in advance.
python tensorflow
python tensorflow
edited Dec 13 '18 at 9:22
张庆昊
asked Nov 8 '18 at 9:27
张庆昊张庆昊
14210
14210
add a comment |
add a comment |
2 Answers
2
active
oldest
votes
In fact, the answer is simple. For convenience, I rename some variables, look this code:
def unpool(x, argmax, strides, unpool_shape=None, batch_size=None, name='unpool'):
x_shape = x.get_shape().as_list()
argmax_shape = argmax.get_shape().as_list()
assert not(x_shape[0] is None and batch_size is None), "must input batch_size if number of batch is alterable"
if x_shape[0] is None:
x_shape[0] = batch_size
if argmax_shape[0] is None:
argmax_shape[0] = x_shape[0]
if unpool_shape is None:
unpool_shape = [x_shape[i] * strides[i] for i in range(4)]
x_unpool = tf.get_variable(name=name, shape=[np.prod(unpool_shape)], initializer=tf.zeros_initializer(), trainable=False)
argmax = tf.cast(argmax, tf.int32)
argmax = tf.reshape(argmax, [np.prod(argmax_shape)])
x = tf.reshape(x, [np.prod(argmax.get_shape().as_list())])
x_unpool_add = tf.scatter_add(x_unpool , argmax, x)
x_unpool_reshape = tf.reshape(x_unpool_add , unpool_shape)
return x_unpool_reshape
x_unpool_add is a op of tf.scatter_add, everytime we compute x_unpool_reshape, x_unpool_add will be called. So x_unpool will add x twice if we compute unpool2 twice. In my origin code, I compute unpool0, unpool1, unpool2 in order, x_unpool_add of unpool1 is called firstly, then when we compute unpool2, because of we need compute unpool1, x_unpool_add will be called again, so it's equal to call x_unpool_add twice, the value is wrong. If we compute unpool2 directly, we will get right result. So replacling tf.scatter_add with tf.scatter_update can avoid this bug.
This code can reproducible this intuitively:
import tensorflow as tf
t1 = tf.get_variable(name='t1', shape=[1], dtype=tf.float32, initializer=tf.zeros_initializer())
t2 = tf.get_variable(name='t2', shape=[1], dtype=tf.float32, initializer=tf.zeros_initializer())
d = tf.scatter_add(t1, [0], [1])
e = tf.scatter_add(t2, [0], d)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
d_out1 = sess.run(d)
d_out2 = sess.run(d)
e_out = sess.run(e)
print(d_out1)
print(d_out2)
print(e_out)
output:
[1.]
[2.]
[3.]
add a comment |
Use tf.scatter_update can avoid this.
import tensorflow as tf
import numpy as np
import random
tf.reset_default_graph()
mat = list(range(64))
random.shuffle(mat)
mat = np.array(mat)
mat = np.reshape(mat, [1,8,8,1])
M = tf.constant(mat, dtype=tf.float32)
pool1, argmax1 = tf.nn.max_pool_with_argmax(M, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
pool2, argmax2 = tf.nn.max_pool_with_argmax(pool1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
pool3, argmax3 = tf.nn.max_pool_with_argmax(pool2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
def unpool(x, argmax, strides, unpool_shape=None, batch_size=None, name='unpool'):
x_shape = x.get_shape().as_list()
argmax_shape = argmax.get_shape().as_list()
assert not(x_shape[0] is None and batch_size is None), "must input batch_size if number of batch is alterable"
if x_shape[0] is None:
x_shape[0] = batch_size
if argmax_shape[0] is None:
argmax_shape[0] = x_shape[0]
if unpool_shape is None:
unpool_shape = [x_shape[i] * strides[i] for i in range(4)]
unpool = tf.get_variable(name=name, shape=[np.prod(unpool_shape)], initializer=tf.zeros_initializer(), trainable=False)
argmax = tf.cast(argmax, tf.int32)
argmax = tf.reshape(argmax, [np.prod(argmax_shape)])
x = tf.reshape(x, [np.prod(argmax.get_shape().as_list())])
unpool = tf.scatter_update(unpool, argmax, x)
unpool = tf.reshape(unpool, unpool_shape)
return unpool
unpool2 = unpool(pool3, argmax3, strides=[1,2,2,1], name='unpool3')
unpool1 = unpool(unpool2, argmax2, strides=[1,2,2,1], name='unpool2')
unpool0 = unpool(unpool1, argmax1, strides=[1,2,2,1], name='unpool1')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
mat_out = mat[:,:,:,0]
pool1_out = sess.run(pool1)[0,:,:,0]
pool2_out = sess.run(pool2)[0,:,:,0]
pool3_out = sess.run(pool3)[0,:,:,0]
argmax1_out = sess.run(argmax1)[0,:,:,0]
argmax2_out = sess.run(argmax2)[0,:,:,0]
argmax3_out = sess.run(argmax3)[0,:,:,0]
unpool2_out = sess.run(unpool2)[0,:,:,0]
unpool1_out = sess.run(unpool1)[0,:,:,0]
unpool0_out = sess.run(unpool0)[0,:,:,0]
print(unpool2_out)
print(unpool1_out)
print(unpool0_out)
output:
[[ 0. 0.]
[ 0. 63.]]
[[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 0. 63.]
[ 0. 0. 0. 0.]]
[[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 63.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]]
add a comment |
Your Answer
StackExchange.ifUsing("editor", function ()
StackExchange.using("externalEditor", function ()
StackExchange.using("snippets", function ()
StackExchange.snippets.init();
);
);
, "code-snippets");
StackExchange.ready(function()
var channelOptions =
tags: "".split(" "),
id: "1"
;
initTagRenderer("".split(" "), "".split(" "), channelOptions);
StackExchange.using("externalEditor", function()
// Have to fire editor after snippets, if snippets enabled
if (StackExchange.settings.snippets.snippetsEnabled)
StackExchange.using("snippets", function()
createEditor();
);
else
createEditor();
);
function createEditor()
StackExchange.prepareEditor(
heartbeatType: 'answer',
autoActivateHeartbeat: false,
convertImagesToLinks: true,
noModals: true,
showLowRepImageUploadWarning: true,
reputationToPostImages: 10,
bindNavPrevention: true,
postfix: "",
imageUploader:
brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
allowUrls: true
,
onDemand: true,
discardSelector: ".discard-answer"
,immediatelyShowMarkdownHelp:true
);
);
Sign up or log in
StackExchange.ready(function ()
StackExchange.helpers.onClickDraftSave('#login-link');
);
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
StackExchange.ready(
function ()
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53204794%2fa-strange-bug-in-tf-scatter-add-when-i-implement-unpool-in-tensorflow%23new-answer', 'question_page');
);
Post as a guest
Required, but never shown
2 Answers
2
active
oldest
votes
2 Answers
2
active
oldest
votes
active
oldest
votes
active
oldest
votes
In fact, the answer is simple. For convenience, I rename some variables, look this code:
def unpool(x, argmax, strides, unpool_shape=None, batch_size=None, name='unpool'):
x_shape = x.get_shape().as_list()
argmax_shape = argmax.get_shape().as_list()
assert not(x_shape[0] is None and batch_size is None), "must input batch_size if number of batch is alterable"
if x_shape[0] is None:
x_shape[0] = batch_size
if argmax_shape[0] is None:
argmax_shape[0] = x_shape[0]
if unpool_shape is None:
unpool_shape = [x_shape[i] * strides[i] for i in range(4)]
x_unpool = tf.get_variable(name=name, shape=[np.prod(unpool_shape)], initializer=tf.zeros_initializer(), trainable=False)
argmax = tf.cast(argmax, tf.int32)
argmax = tf.reshape(argmax, [np.prod(argmax_shape)])
x = tf.reshape(x, [np.prod(argmax.get_shape().as_list())])
x_unpool_add = tf.scatter_add(x_unpool , argmax, x)
x_unpool_reshape = tf.reshape(x_unpool_add , unpool_shape)
return x_unpool_reshape
x_unpool_add is a op of tf.scatter_add, everytime we compute x_unpool_reshape, x_unpool_add will be called. So x_unpool will add x twice if we compute unpool2 twice. In my origin code, I compute unpool0, unpool1, unpool2 in order, x_unpool_add of unpool1 is called firstly, then when we compute unpool2, because of we need compute unpool1, x_unpool_add will be called again, so it's equal to call x_unpool_add twice, the value is wrong. If we compute unpool2 directly, we will get right result. So replacling tf.scatter_add with tf.scatter_update can avoid this bug.
This code can reproducible this intuitively:
import tensorflow as tf
t1 = tf.get_variable(name='t1', shape=[1], dtype=tf.float32, initializer=tf.zeros_initializer())
t2 = tf.get_variable(name='t2', shape=[1], dtype=tf.float32, initializer=tf.zeros_initializer())
d = tf.scatter_add(t1, [0], [1])
e = tf.scatter_add(t2, [0], d)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
d_out1 = sess.run(d)
d_out2 = sess.run(d)
e_out = sess.run(e)
print(d_out1)
print(d_out2)
print(e_out)
output:
[1.]
[2.]
[3.]
add a comment |
In fact, the answer is simple. For convenience, I rename some variables, look this code:
def unpool(x, argmax, strides, unpool_shape=None, batch_size=None, name='unpool'):
x_shape = x.get_shape().as_list()
argmax_shape = argmax.get_shape().as_list()
assert not(x_shape[0] is None and batch_size is None), "must input batch_size if number of batch is alterable"
if x_shape[0] is None:
x_shape[0] = batch_size
if argmax_shape[0] is None:
argmax_shape[0] = x_shape[0]
if unpool_shape is None:
unpool_shape = [x_shape[i] * strides[i] for i in range(4)]
x_unpool = tf.get_variable(name=name, shape=[np.prod(unpool_shape)], initializer=tf.zeros_initializer(), trainable=False)
argmax = tf.cast(argmax, tf.int32)
argmax = tf.reshape(argmax, [np.prod(argmax_shape)])
x = tf.reshape(x, [np.prod(argmax.get_shape().as_list())])
x_unpool_add = tf.scatter_add(x_unpool , argmax, x)
x_unpool_reshape = tf.reshape(x_unpool_add , unpool_shape)
return x_unpool_reshape
x_unpool_add is a op of tf.scatter_add, everytime we compute x_unpool_reshape, x_unpool_add will be called. So x_unpool will add x twice if we compute unpool2 twice. In my origin code, I compute unpool0, unpool1, unpool2 in order, x_unpool_add of unpool1 is called firstly, then when we compute unpool2, because of we need compute unpool1, x_unpool_add will be called again, so it's equal to call x_unpool_add twice, the value is wrong. If we compute unpool2 directly, we will get right result. So replacling tf.scatter_add with tf.scatter_update can avoid this bug.
This code can reproducible this intuitively:
import tensorflow as tf
t1 = tf.get_variable(name='t1', shape=[1], dtype=tf.float32, initializer=tf.zeros_initializer())
t2 = tf.get_variable(name='t2', shape=[1], dtype=tf.float32, initializer=tf.zeros_initializer())
d = tf.scatter_add(t1, [0], [1])
e = tf.scatter_add(t2, [0], d)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
d_out1 = sess.run(d)
d_out2 = sess.run(d)
e_out = sess.run(e)
print(d_out1)
print(d_out2)
print(e_out)
output:
[1.]
[2.]
[3.]
add a comment |
In fact, the answer is simple. For convenience, I rename some variables, look this code:
def unpool(x, argmax, strides, unpool_shape=None, batch_size=None, name='unpool'):
x_shape = x.get_shape().as_list()
argmax_shape = argmax.get_shape().as_list()
assert not(x_shape[0] is None and batch_size is None), "must input batch_size if number of batch is alterable"
if x_shape[0] is None:
x_shape[0] = batch_size
if argmax_shape[0] is None:
argmax_shape[0] = x_shape[0]
if unpool_shape is None:
unpool_shape = [x_shape[i] * strides[i] for i in range(4)]
x_unpool = tf.get_variable(name=name, shape=[np.prod(unpool_shape)], initializer=tf.zeros_initializer(), trainable=False)
argmax = tf.cast(argmax, tf.int32)
argmax = tf.reshape(argmax, [np.prod(argmax_shape)])
x = tf.reshape(x, [np.prod(argmax.get_shape().as_list())])
x_unpool_add = tf.scatter_add(x_unpool , argmax, x)
x_unpool_reshape = tf.reshape(x_unpool_add , unpool_shape)
return x_unpool_reshape
x_unpool_add is a op of tf.scatter_add, everytime we compute x_unpool_reshape, x_unpool_add will be called. So x_unpool will add x twice if we compute unpool2 twice. In my origin code, I compute unpool0, unpool1, unpool2 in order, x_unpool_add of unpool1 is called firstly, then when we compute unpool2, because of we need compute unpool1, x_unpool_add will be called again, so it's equal to call x_unpool_add twice, the value is wrong. If we compute unpool2 directly, we will get right result. So replacling tf.scatter_add with tf.scatter_update can avoid this bug.
This code can reproducible this intuitively:
import tensorflow as tf
t1 = tf.get_variable(name='t1', shape=[1], dtype=tf.float32, initializer=tf.zeros_initializer())
t2 = tf.get_variable(name='t2', shape=[1], dtype=tf.float32, initializer=tf.zeros_initializer())
d = tf.scatter_add(t1, [0], [1])
e = tf.scatter_add(t2, [0], d)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
d_out1 = sess.run(d)
d_out2 = sess.run(d)
e_out = sess.run(e)
print(d_out1)
print(d_out2)
print(e_out)
output:
[1.]
[2.]
[3.]
In fact, the answer is simple. For convenience, I rename some variables, look this code:
def unpool(x, argmax, strides, unpool_shape=None, batch_size=None, name='unpool'):
x_shape = x.get_shape().as_list()
argmax_shape = argmax.get_shape().as_list()
assert not(x_shape[0] is None and batch_size is None), "must input batch_size if number of batch is alterable"
if x_shape[0] is None:
x_shape[0] = batch_size
if argmax_shape[0] is None:
argmax_shape[0] = x_shape[0]
if unpool_shape is None:
unpool_shape = [x_shape[i] * strides[i] for i in range(4)]
x_unpool = tf.get_variable(name=name, shape=[np.prod(unpool_shape)], initializer=tf.zeros_initializer(), trainable=False)
argmax = tf.cast(argmax, tf.int32)
argmax = tf.reshape(argmax, [np.prod(argmax_shape)])
x = tf.reshape(x, [np.prod(argmax.get_shape().as_list())])
x_unpool_add = tf.scatter_add(x_unpool , argmax, x)
x_unpool_reshape = tf.reshape(x_unpool_add , unpool_shape)
return x_unpool_reshape
x_unpool_add is a op of tf.scatter_add, everytime we compute x_unpool_reshape, x_unpool_add will be called. So x_unpool will add x twice if we compute unpool2 twice. In my origin code, I compute unpool0, unpool1, unpool2 in order, x_unpool_add of unpool1 is called firstly, then when we compute unpool2, because of we need compute unpool1, x_unpool_add will be called again, so it's equal to call x_unpool_add twice, the value is wrong. If we compute unpool2 directly, we will get right result. So replacling tf.scatter_add with tf.scatter_update can avoid this bug.
This code can reproducible this intuitively:
import tensorflow as tf
t1 = tf.get_variable(name='t1', shape=[1], dtype=tf.float32, initializer=tf.zeros_initializer())
t2 = tf.get_variable(name='t2', shape=[1], dtype=tf.float32, initializer=tf.zeros_initializer())
d = tf.scatter_add(t1, [0], [1])
e = tf.scatter_add(t2, [0], d)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
d_out1 = sess.run(d)
d_out2 = sess.run(d)
e_out = sess.run(e)
print(d_out1)
print(d_out2)
print(e_out)
output:
[1.]
[2.]
[3.]
edited Dec 18 '18 at 8:00
answered Nov 12 '18 at 5:41
张庆昊张庆昊
14210
14210
add a comment |
add a comment |
Use tf.scatter_update can avoid this.
import tensorflow as tf
import numpy as np
import random
tf.reset_default_graph()
mat = list(range(64))
random.shuffle(mat)
mat = np.array(mat)
mat = np.reshape(mat, [1,8,8,1])
M = tf.constant(mat, dtype=tf.float32)
pool1, argmax1 = tf.nn.max_pool_with_argmax(M, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
pool2, argmax2 = tf.nn.max_pool_with_argmax(pool1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
pool3, argmax3 = tf.nn.max_pool_with_argmax(pool2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
def unpool(x, argmax, strides, unpool_shape=None, batch_size=None, name='unpool'):
x_shape = x.get_shape().as_list()
argmax_shape = argmax.get_shape().as_list()
assert not(x_shape[0] is None and batch_size is None), "must input batch_size if number of batch is alterable"
if x_shape[0] is None:
x_shape[0] = batch_size
if argmax_shape[0] is None:
argmax_shape[0] = x_shape[0]
if unpool_shape is None:
unpool_shape = [x_shape[i] * strides[i] for i in range(4)]
unpool = tf.get_variable(name=name, shape=[np.prod(unpool_shape)], initializer=tf.zeros_initializer(), trainable=False)
argmax = tf.cast(argmax, tf.int32)
argmax = tf.reshape(argmax, [np.prod(argmax_shape)])
x = tf.reshape(x, [np.prod(argmax.get_shape().as_list())])
unpool = tf.scatter_update(unpool, argmax, x)
unpool = tf.reshape(unpool, unpool_shape)
return unpool
unpool2 = unpool(pool3, argmax3, strides=[1,2,2,1], name='unpool3')
unpool1 = unpool(unpool2, argmax2, strides=[1,2,2,1], name='unpool2')
unpool0 = unpool(unpool1, argmax1, strides=[1,2,2,1], name='unpool1')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
mat_out = mat[:,:,:,0]
pool1_out = sess.run(pool1)[0,:,:,0]
pool2_out = sess.run(pool2)[0,:,:,0]
pool3_out = sess.run(pool3)[0,:,:,0]
argmax1_out = sess.run(argmax1)[0,:,:,0]
argmax2_out = sess.run(argmax2)[0,:,:,0]
argmax3_out = sess.run(argmax3)[0,:,:,0]
unpool2_out = sess.run(unpool2)[0,:,:,0]
unpool1_out = sess.run(unpool1)[0,:,:,0]
unpool0_out = sess.run(unpool0)[0,:,:,0]
print(unpool2_out)
print(unpool1_out)
print(unpool0_out)
output:
[[ 0. 0.]
[ 0. 63.]]
[[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 0. 63.]
[ 0. 0. 0. 0.]]
[[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 63.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]]
add a comment |
Use tf.scatter_update can avoid this.
import tensorflow as tf
import numpy as np
import random
tf.reset_default_graph()
mat = list(range(64))
random.shuffle(mat)
mat = np.array(mat)
mat = np.reshape(mat, [1,8,8,1])
M = tf.constant(mat, dtype=tf.float32)
pool1, argmax1 = tf.nn.max_pool_with_argmax(M, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
pool2, argmax2 = tf.nn.max_pool_with_argmax(pool1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
pool3, argmax3 = tf.nn.max_pool_with_argmax(pool2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
def unpool(x, argmax, strides, unpool_shape=None, batch_size=None, name='unpool'):
x_shape = x.get_shape().as_list()
argmax_shape = argmax.get_shape().as_list()
assert not(x_shape[0] is None and batch_size is None), "must input batch_size if number of batch is alterable"
if x_shape[0] is None:
x_shape[0] = batch_size
if argmax_shape[0] is None:
argmax_shape[0] = x_shape[0]
if unpool_shape is None:
unpool_shape = [x_shape[i] * strides[i] for i in range(4)]
unpool = tf.get_variable(name=name, shape=[np.prod(unpool_shape)], initializer=tf.zeros_initializer(), trainable=False)
argmax = tf.cast(argmax, tf.int32)
argmax = tf.reshape(argmax, [np.prod(argmax_shape)])
x = tf.reshape(x, [np.prod(argmax.get_shape().as_list())])
unpool = tf.scatter_update(unpool, argmax, x)
unpool = tf.reshape(unpool, unpool_shape)
return unpool
unpool2 = unpool(pool3, argmax3, strides=[1,2,2,1], name='unpool3')
unpool1 = unpool(unpool2, argmax2, strides=[1,2,2,1], name='unpool2')
unpool0 = unpool(unpool1, argmax1, strides=[1,2,2,1], name='unpool1')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
mat_out = mat[:,:,:,0]
pool1_out = sess.run(pool1)[0,:,:,0]
pool2_out = sess.run(pool2)[0,:,:,0]
pool3_out = sess.run(pool3)[0,:,:,0]
argmax1_out = sess.run(argmax1)[0,:,:,0]
argmax2_out = sess.run(argmax2)[0,:,:,0]
argmax3_out = sess.run(argmax3)[0,:,:,0]
unpool2_out = sess.run(unpool2)[0,:,:,0]
unpool1_out = sess.run(unpool1)[0,:,:,0]
unpool0_out = sess.run(unpool0)[0,:,:,0]
print(unpool2_out)
print(unpool1_out)
print(unpool0_out)
output:
[[ 0. 0.]
[ 0. 63.]]
[[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 0. 63.]
[ 0. 0. 0. 0.]]
[[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 63.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]]
add a comment |
Use tf.scatter_update can avoid this.
import tensorflow as tf
import numpy as np
import random
tf.reset_default_graph()
mat = list(range(64))
random.shuffle(mat)
mat = np.array(mat)
mat = np.reshape(mat, [1,8,8,1])
M = tf.constant(mat, dtype=tf.float32)
pool1, argmax1 = tf.nn.max_pool_with_argmax(M, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
pool2, argmax2 = tf.nn.max_pool_with_argmax(pool1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
pool3, argmax3 = tf.nn.max_pool_with_argmax(pool2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
def unpool(x, argmax, strides, unpool_shape=None, batch_size=None, name='unpool'):
x_shape = x.get_shape().as_list()
argmax_shape = argmax.get_shape().as_list()
assert not(x_shape[0] is None and batch_size is None), "must input batch_size if number of batch is alterable"
if x_shape[0] is None:
x_shape[0] = batch_size
if argmax_shape[0] is None:
argmax_shape[0] = x_shape[0]
if unpool_shape is None:
unpool_shape = [x_shape[i] * strides[i] for i in range(4)]
unpool = tf.get_variable(name=name, shape=[np.prod(unpool_shape)], initializer=tf.zeros_initializer(), trainable=False)
argmax = tf.cast(argmax, tf.int32)
argmax = tf.reshape(argmax, [np.prod(argmax_shape)])
x = tf.reshape(x, [np.prod(argmax.get_shape().as_list())])
unpool = tf.scatter_update(unpool, argmax, x)
unpool = tf.reshape(unpool, unpool_shape)
return unpool
unpool2 = unpool(pool3, argmax3, strides=[1,2,2,1], name='unpool3')
unpool1 = unpool(unpool2, argmax2, strides=[1,2,2,1], name='unpool2')
unpool0 = unpool(unpool1, argmax1, strides=[1,2,2,1], name='unpool1')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
mat_out = mat[:,:,:,0]
pool1_out = sess.run(pool1)[0,:,:,0]
pool2_out = sess.run(pool2)[0,:,:,0]
pool3_out = sess.run(pool3)[0,:,:,0]
argmax1_out = sess.run(argmax1)[0,:,:,0]
argmax2_out = sess.run(argmax2)[0,:,:,0]
argmax3_out = sess.run(argmax3)[0,:,:,0]
unpool2_out = sess.run(unpool2)[0,:,:,0]
unpool1_out = sess.run(unpool1)[0,:,:,0]
unpool0_out = sess.run(unpool0)[0,:,:,0]
print(unpool2_out)
print(unpool1_out)
print(unpool0_out)
output:
[[ 0. 0.]
[ 0. 63.]]
[[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 0. 63.]
[ 0. 0. 0. 0.]]
[[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 63.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]]
Use tf.scatter_update can avoid this.
import tensorflow as tf
import numpy as np
import random
tf.reset_default_graph()
mat = list(range(64))
random.shuffle(mat)
mat = np.array(mat)
mat = np.reshape(mat, [1,8,8,1])
M = tf.constant(mat, dtype=tf.float32)
pool1, argmax1 = tf.nn.max_pool_with_argmax(M, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
pool2, argmax2 = tf.nn.max_pool_with_argmax(pool1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
pool3, argmax3 = tf.nn.max_pool_with_argmax(pool2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
def unpool(x, argmax, strides, unpool_shape=None, batch_size=None, name='unpool'):
x_shape = x.get_shape().as_list()
argmax_shape = argmax.get_shape().as_list()
assert not(x_shape[0] is None and batch_size is None), "must input batch_size if number of batch is alterable"
if x_shape[0] is None:
x_shape[0] = batch_size
if argmax_shape[0] is None:
argmax_shape[0] = x_shape[0]
if unpool_shape is None:
unpool_shape = [x_shape[i] * strides[i] for i in range(4)]
unpool = tf.get_variable(name=name, shape=[np.prod(unpool_shape)], initializer=tf.zeros_initializer(), trainable=False)
argmax = tf.cast(argmax, tf.int32)
argmax = tf.reshape(argmax, [np.prod(argmax_shape)])
x = tf.reshape(x, [np.prod(argmax.get_shape().as_list())])
unpool = tf.scatter_update(unpool, argmax, x)
unpool = tf.reshape(unpool, unpool_shape)
return unpool
unpool2 = unpool(pool3, argmax3, strides=[1,2,2,1], name='unpool3')
unpool1 = unpool(unpool2, argmax2, strides=[1,2,2,1], name='unpool2')
unpool0 = unpool(unpool1, argmax1, strides=[1,2,2,1], name='unpool1')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
mat_out = mat[:,:,:,0]
pool1_out = sess.run(pool1)[0,:,:,0]
pool2_out = sess.run(pool2)[0,:,:,0]
pool3_out = sess.run(pool3)[0,:,:,0]
argmax1_out = sess.run(argmax1)[0,:,:,0]
argmax2_out = sess.run(argmax2)[0,:,:,0]
argmax3_out = sess.run(argmax3)[0,:,:,0]
unpool2_out = sess.run(unpool2)[0,:,:,0]
unpool1_out = sess.run(unpool1)[0,:,:,0]
unpool0_out = sess.run(unpool0)[0,:,:,0]
print(unpool2_out)
print(unpool1_out)
print(unpool0_out)
output:
[[ 0. 0.]
[ 0. 63.]]
[[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 0. 63.]
[ 0. 0. 0. 0.]]
[[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 63.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0.]]
edited Dec 18 '18 at 7:59
answered Nov 8 '18 at 9:31
张庆昊张庆昊
14210
14210
add a comment |
add a comment |
Thanks for contributing an answer to Stack Overflow!
- Please be sure to answer the question. Provide details and share your research!
But avoid …
- Asking for help, clarification, or responding to other answers.
- Making statements based on opinion; back them up with references or personal experience.
To learn more, see our tips on writing great answers.
Some of your past answers have not been well-received, and you're in danger of being blocked from answering.
Please pay close attention to the following guidance:
- Please be sure to answer the question. Provide details and share your research!
But avoid …
- Asking for help, clarification, or responding to other answers.
- Making statements based on opinion; back them up with references or personal experience.
To learn more, see our tips on writing great answers.
Sign up or log in
StackExchange.ready(function ()
StackExchange.helpers.onClickDraftSave('#login-link');
);
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
StackExchange.ready(
function ()
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53204794%2fa-strange-bug-in-tf-scatter-add-when-i-implement-unpool-in-tensorflow%23new-answer', 'question_page');
);
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function ()
StackExchange.helpers.onClickDraftSave('#login-link');
);
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function ()
StackExchange.helpers.onClickDraftSave('#login-link');
);
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function ()
StackExchange.helpers.onClickDraftSave('#login-link');
);
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown