[API] Tensorflow Convert to Tensor - slicing and joining
(텐서플로우 텐서 변환 - 자르고 붙이기)
자르고 붙이기
TensorFlow는 텐서를 자르고 특정 부분을 추출해내거나, 여러 텐서를 붙일 수 있는 몇 가지 함수를 제공합니다.
tf.slice(input_, begin, size, name=None)
예시:
# 'input'은 [[[1, 1, 1], [2, 2, 2]],
# [[3, 3, 3], [4, 4, 4]],
# [[5, 5, 5], [6, 6, 6]]]
tf.slice(input, [1, 0, 0], [1, 1, 3]) ==> [[[3, 3, 3]]]
tf.slice(input, [1, 0, 0], [1, 2, 3]) ==> [[[3, 3, 3],
[4, 4, 4]]]
tf.slice(input, [1, 0, 0], [2, 1, 3]) ==> [[[3, 3, 3]],
[[5, 5, 5]]]
인자:
input_
: Tensor
.begin
: int32
또는 int64
형 Tensor
.size
: int32
또는 int64
형 Tensor
.name
: 오퍼레이션의 명칭. (선택사항)
반환값:
출처: 텐서 변환
Tensorflow tf.slice 결과
In [1]:
import tensorflow as tf
import tfutil
# [1, 2, 3, 4, 5, 6, 7, 8, 9]
const1 = tf.constant([1, 2, 3, 4, 5, 6, 7, 8, 9])
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(9,), dtype=int32)
Tensor("Shape:0", shape=(1,), dtype=int32)
[1 2 3 4 5 6 7 8 9]
In [2]:
sl_const1 = tf.slice(const1, [2], [3])
print(sl_const1)
print(tf.shape(sl_const1))
tfutil.print_operation_value(sl_const1)
Tensor("Slice:0", shape=(3,), dtype=int32)
Tensor("Shape_1:0", shape=(1,), dtype=int32)
[3 4 5]
In [3]:
# [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11, 12, 13, 14, 15, 16, 17, 18, 19,20]]
const2 = tf.constant([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11, 12, 13, 14, 15, 16, 17, 18, 19,20]])
print(const2)
print(tf.shape(const2))
tfutil.print_constant(const2)
Tensor("Const_1:0", shape=(2, 10), dtype=int32)
Tensor("Shape_2:0", shape=(2,), dtype=int32)
[[ 1 2 3 4 5 6 7 8 9 10]
[11 12 13 14 15 16 17 18 19 20]]
In [4]:
sl_const2 = tf.slice(const2, [0, 1], [1, 3])
print(sl_const2)
print(tf.shape(sl_const2))
tfutil.print_operation_value(sl_const2)
Tensor("Slice_1:0", shape=(1, 3), dtype=int32)
Tensor("Shape_3:0", shape=(2,), dtype=int32)
[[2 3 4]]
In [5]:
sl_const2 = tf.slice(const2, [0, 2], [1, 3])
print(sl_const2)
print(tf.shape(sl_const2))
tfutil.print_operation_value(sl_const2)
Tensor("Slice_2:0", shape=(1, 3), dtype=int32)
Tensor("Shape_4:0", shape=(2,), dtype=int32)
[[3 4 5]]
In [6]:
sl_const2 = tf.slice(const2, [1, 1], [1, 3])
print(sl_const2)
print(tf.shape(sl_const2))
tfutil.print_operation_value(sl_const2)
Tensor("Slice_3:0", shape=(1, 3), dtype=int32)
Tensor("Shape_5:0", shape=(2,), dtype=int32)
[[12 13 14]]
In [7]:
sl_const2 = tf.slice(const2, [1, 2], [1, 3])
print(sl_const2)
print(tf.shape(sl_const2))
tfutil.print_operation_value(sl_const2)
Tensor("Slice_4:0", shape=(1, 3), dtype=int32)
Tensor("Shape_6:0", shape=(2,), dtype=int32)
[[13 14 15]]
In [8]:
# [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]], [[13, 14, 15], [16, 17, 18]]]
const3 = tf.constant([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]], [[13, 14, 15], [16, 17, 18]]])
print(const3)
print(tf.shape(const3))
tfutil.print_constant(const3)
Tensor("Const_2:0", shape=(3, 2, 3), dtype=int32)
Tensor("Shape_7:0", shape=(3,), dtype=int32)
[[[ 1 2 3]
[ 4 5 6]]
[[ 7 8 9]
[10 11 12]]
[[13 14 15]
[16 17 18]]]
In [9]:
sl_const3 = tf.slice(const3, [0, 0, 0], [1, 1, 1])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_5:0", shape=(1, 1, 1), dtype=int32)
Tensor("Shape_8:0", shape=(3,), dtype=int32)
[[[1]]]
In [10]:
sl_const3 = tf.slice(const3, [0, 0, 0], [1, 1, 2])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_6:0", shape=(1, 1, 2), dtype=int32)
Tensor("Shape_9:0", shape=(3,), dtype=int32)
[[[1 2]]]
In [11]:
sl_const3 = tf.slice(const3, [0, 1, 0], [1, 1, 1])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_7:0", shape=(1, 1, 1), dtype=int32)
Tensor("Shape_10:0", shape=(3,), dtype=int32)
[[[4]]]
In [12]:
sl_const3 = tf.slice(const3, [0, 1, 0], [1, 1, 2])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_8:0", shape=(1, 1, 2), dtype=int32)
Tensor("Shape_11:0", shape=(3,), dtype=int32)
[[[4 5]]]
In [13]:
sl_const3 = tf.slice(const3, [1, 0, 0], [1, 1, 1])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_9:0", shape=(1, 1, 1), dtype=int32)
Tensor("Shape_12:0", shape=(3,), dtype=int32)
[[[7]]]
In [14]:
sl_const3 = tf.slice(const3, [1, 0, 0], [1, 1, 2])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_10:0", shape=(1, 1, 2), dtype=int32)
Tensor("Shape_13:0", shape=(3,), dtype=int32)
[[[7 8]]]
In [15]:
sl_const3 = tf.slice(const3, [1, 1, 0], [1, 1, 1])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_11:0", shape=(1, 1, 1), dtype=int32)
Tensor("Shape_14:0", shape=(3,), dtype=int32)
[[[10]]]
In [16]:
sl_const3 = tf.slice(const3, [1, 1, 0], [1, 1, 2])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_12:0", shape=(1, 1, 2), dtype=int32)
Tensor("Shape_15:0", shape=(3,), dtype=int32)
[[[10 11]]]
In [17]:
sl_const3 = tf.slice(const3, [2, 0, 0], [1, 1, 1])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_13:0", shape=(1, 1, 1), dtype=int32)
Tensor("Shape_16:0", shape=(3,), dtype=int32)
[[[13]]]
In [18]:
sl_const3 = tf.slice(const3, [2, 0, 0], [1, 1, 2])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_14:0", shape=(1, 1, 2), dtype=int32)
Tensor("Shape_17:0", shape=(3,), dtype=int32)
[[[13 14]]]
In [19]:
sl_const3 = tf.slice(const3, [2, 1, 0], [1, 1, 1])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_15:0", shape=(1, 1, 1), dtype=int32)
Tensor("Shape_18:0", shape=(3,), dtype=int32)
[[[16]]]
In [20]:
sl_const3 = tf.slice(const3, [2, 1, 0], [1, 1, 2])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_16:0", shape=(1, 1, 2), dtype=int32)
Tensor("Shape_19:0", shape=(3,), dtype=int32)
[[[16 17]]]
In [21]:
# [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]], [[13, 14, 15], [16, 17, 18]]]
sl_const3 = tf.slice(const3, [0, 0, 0], [1, 1, 1])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_17:0", shape=(1, 1, 1), dtype=int32)
Tensor("Shape_20:0", shape=(3,), dtype=int32)
[[[1]]]
In [22]:
sl_const3 = tf.slice(const3, [0, 0, 0], [1, 1, 2])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_18:0", shape=(1, 1, 2), dtype=int32)
Tensor("Shape_21:0", shape=(3,), dtype=int32)
[[[1 2]]]
In [23]:
sl_const3 = tf.slice(const3, [0, 0, 0], [1, 1, 3])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_19:0", shape=(1, 1, 3), dtype=int32)
Tensor("Shape_22:0", shape=(3,), dtype=int32)
[[[1 2 3]]]
In [24]:
sl_const3 = tf.slice(const3, [0, 0, 0], [1, 2, 1])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_20:0", shape=(1, 2, 1), dtype=int32)
Tensor("Shape_23:0", shape=(3,), dtype=int32)
[[[1]
[4]]]
In [25]:
sl_const3 = tf.slice(const3, [0, 0, 0], [1, 2, 2])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_21:0", shape=(1, 2, 2), dtype=int32)
Tensor("Shape_24:0", shape=(3,), dtype=int32)
[[[1 2]
[4 5]]]
In [26]:
sl_const3 = tf.slice(const3, [0, 0, 0], [1, 2, 3])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_22:0", shape=(1, 2, 3), dtype=int32)
Tensor("Shape_25:0", shape=(3,), dtype=int32)
[[[1 2 3]
[4 5 6]]]
In [27]:
sl_const3 = tf.slice(const3, [0, 0, 0], [3, 1, 1])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_23:0", shape=(3, 1, 1), dtype=int32)
Tensor("Shape_26:0", shape=(3,), dtype=int32)
[[[ 1]]
[[ 7]]
[[13]]]
In [28]:
sl_const3 = tf.slice(const3, [0, 0, 0], [3, 2, 1])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_24:0", shape=(3, 2, 1), dtype=int32)
Tensor("Shape_27:0", shape=(3,), dtype=int32)
[[[ 1]
[ 4]]
[[ 7]
[10]]
[[13]
[16]]]
In [29]:
sl_const3 = tf.slice(const3, [0, 0, 0], [3, 1, 2])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_25:0", shape=(3, 1, 2), dtype=int32)
Tensor("Shape_28:0", shape=(3,), dtype=int32)
[[[ 1 2]]
[[ 7 8]]
[[13 14]]]
In [30]:
sl_const3 = tf.slice(const3, [0, 0, 0], [3, 2, 2])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_26:0", shape=(3, 2, 2), dtype=int32)
Tensor("Shape_29:0", shape=(3,), dtype=int32)
[[[ 1 2]
[ 4 5]]
[[ 7 8]
[10 11]]
[[13 14]
[16 17]]]
In [31]:
sl_const3 = tf.slice(const3, [0, 0, 0], [3, 1, 3])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_27:0", shape=(3, 1, 3), dtype=int32)
Tensor("Shape_30:0", shape=(3,), dtype=int32)
[[[ 1 2 3]]
[[ 7 8 9]]
[[13 14 15]]]
In [32]:
sl_const3 = tf.slice(const3, [0, 0, 0], [3, 2, 3])
print(sl_const3)
print(tf.shape(sl_const3))
tfutil.print_operation_value(sl_const3)
Tensor("Slice_28:0", shape=(3, 2, 3), dtype=int32)
Tensor("Shape_31:0", shape=(3,), dtype=int32)
[[[ 1 2 3]
[ 4 5 6]]
[[ 7 8 9]
[10 11 12]]
[[13 14 15]
[16 17 18]]]
tf.split(split_dim, num_split, value, name='split')
예시:
# 'value'는 구조(shape) [5, 30]의 텐서
# 'value'를 차원 1을 따라 3개의 텐서로 분리
split0, split1, split2 = tf.split(1, 3, value)
tf.shape(split0) ==> [5, 10]
인자:
split_dim
: 0-D int32
Tensor
. 텐서를 분리할 차원. [0, rank(value))
범위 내에 있어야 합니다.num_split
: Python 정수. 텐서를 분리할 개수.value
: 분리할 Tensor
.name
: 오퍼레이션의 명칭. (선택사항)
반환값:
출처: 텐서 변환
Tensorflow tf.split 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
# [1, 2, 3, 4, 5, 6, 7, 8, 9]
const1 = tf.constant(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), dtype=tf.int32)
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(10,), dtype=int32)
Tensor("Shape:0", shape=(1,), dtype=int32)
[ 1 2 3 4 5 6 7 8 9 10]
In [2]:
sp1_const1, sp2_const1 = tf.split(const1, 2, 0)
print(sp1_const1)
print(tf.shape(sp1_const1))
tfutil.print_operation_value(sp1_const1)
Tensor("split:0", shape=(5,), dtype=int32)
Tensor("Shape_1:0", shape=(1,), dtype=int32)
[1 2 3 4 5]
In [3]:
print(sp2_const1)
print(tf.shape(sp2_const1))
tfutil.print_operation_value(sp2_const1)
Tensor("split:1", shape=(5,), dtype=int32)
Tensor("Shape_2:0", shape=(1,), dtype=int32)
[ 6 7 8 9 10]
In [4]:
# [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]], [[13, 14, 15], [16, 17, 18]]]
const2 = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]])
print(const2)
print(tf.shape(const2))
tfutil.print_constant(const2)
Tensor("Const_2:0", shape=(5, 4), dtype=int32)
Tensor("Shape_3:0", shape=(2,), dtype=int32)
[[ 1 2 3 4]
[ 5 6 7 8]
[ 9 10 11 12]
[13 14 15 16]
[17 18 19 20]]
In [5]:
sp1_const2, sp2_const2 = tf.split(const2, 2, 1)
print(sp1_const2)
print(tf.shape(sp1_const2))
tfutil.print_operation_value(sp1_const2)
Tensor("split_1:0", shape=(5, 2), dtype=int32)
Tensor("Shape_4:0", shape=(2,), dtype=int32)
[[ 1 2]
[ 5 6]
[ 9 10]
[13 14]
[17 18]]
In [6]:
print(sp2_const2)
print(tf.shape(sp2_const2))
tfutil.print_operation_value(sp2_const2)
Tensor("split_1:1", shape=(5, 2), dtype=int32)
Tensor("Shape_5:0", shape=(2,), dtype=int32)
[[ 3 4]
[ 7 8]
[11 12]
[15 16]
[19 20]]
In [7]:
# [5, 30]
var1 = tf.Variable(tf.random_normal([5, 30]))
print(var1)
print(tf.shape(var1))
<tf.Variable 'Variable:0' shape=(5, 30) dtype=float32_ref>
Tensor("Shape_6:0", shape=(2,), dtype=int32)
In [8]:
sp1_var1, sp2_var1, sp3_var1 = tf.split(var1, 3, 1)
print(sp1_var1)
print(tf.shape(sp1_var1))
Tensor("split_2:0", shape=(5, 10), dtype=float32)
Tensor("Shape_7:0", shape=(2,), dtype=int32)
In [9]:
print(sp2_var1)
print(tf.shape(sp2_var1))
Tensor("split_2:1", shape=(5, 10), dtype=float32)
Tensor("Shape_8:0", shape=(2,), dtype=int32)
In [10]:
print(sp3_var1)
print(tf.shape(sp3_var1))
Tensor("split_2:2", shape=(5, 10), dtype=float32)
Tensor("Shape_9:0", shape=(2,), dtype=int32)
tf.tile(input, multiples, name=None)
인자:
input
: 1-D 혹은 그 이상의 Tensor
.multiples
: int32
형 Tensor
. 1-D. 길이는 input
의 차원의 수와 같아야 합니다.name
: 오퍼레이션의 명칭. (선택사항)
반환값:
출처: 텐서 변환
Tensorflow tf.tile 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
# [1, 2, 3, 4, 5, 6, 7, 8, 9]
const1 = tf.constant(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), dtype=tf.int32)
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(10,),
dtype=int32)
Tensor("Shape:0", shape=(1,),
dtype=int32)
[ 1
2 3 4
5 6 7
8 9 10]
In [3]:
ti_const1 = tf.tile(const1,
[1])
print(ti_const1)
print(tf.shape(ti_const1))
tfutil.print_operation_value(ti_const1)
Tensor("Tile:0", shape=(10,),
dtype=int32)
Tensor("Shape_1:0", shape=(1,),
dtype=int32)
[ 1
2 3 4
5 6 7
8 9 10]
In [4]:
ti_const1 = tf.tile(const1,
[2])
print(ti_const1)
print(tf.shape(ti_const1))
tfutil.print_operation_value(ti_const1)
Tensor("Tile_1:0", shape=(20,),
dtype=int32)
Tensor("Shape_2:0", shape=(1,),
dtype=int32)
[ 1
2 3 4
5 6 7
8 9 10 1
2 3 4
5 6 7
8 9 10]
In [5]:
ti_const1 = tf.tile(const1,
[3])
print(ti_const1)
print(tf.shape(ti_const1))
tfutil.print_operation_value(ti_const1)
Tensor("Tile_2:0", shape=(30,),
dtype=int32)
Tensor("Shape_3:0", shape=(1,),
dtype=int32)
[ 1
2 3 4
5 6 7
8 9 10 1
2 3 4
5 6 7
8 9 10 1
2 3 4 5
6 7
8 9 10]
tf.pad(tensor, paddings, mode='CONSTANT', name=None)
예시:
# 't'는 [[1, 2, 3], [4, 5, 6]].
# 'paddings'는 [[1, 1,], [2, 2]].
# 't'의 랭크(rank)는 2.
pad(t, paddings, "CONSTANT") ==> [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 3, 0, 0],
[0, 0, 4, 5, 6, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
pad(t, paddings, "REFLECT") ==> [[6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1],
[6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1]]
pad(t, paddings, "SYMMETRIC") ==> [[2, 1, 1, 2, 3, 3, 2],
[2, 1, 1, 2, 3, 3, 2],
[5, 4, 4, 5, 6, 6, 5],
[5, 4, 4, 5, 6, 6, 5]]
인자:
tensor
: Tensor
.paddings
: int32
형 Tensor
.mode
: "CONSTANT", "REFLECT", "SYMMETRIC" 중 하나.name
: 오퍼레이션의 명칭. (선택사항)
반환값:
예외:
ValueError
: 모드가 "CONSTANT", "REFLECT", or "SYMMETRIC" 중의 하나가 아닌 경우.
출처: 텐서 변환
Tensorflow tf.pad 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
# [[1, 2, 3], [4, 5, 6]]
const1 = tf.constant(np.array([[1, 2, 3], [4, 5, 6]]), dtype=tf.int32)
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(2, 3),
dtype=int32)
Tensor("Shape:0", shape=(2,),
dtype=int32)
[[1 2 3]
[4 5 6]]
In [3]:
paddings = [[0, 0,], [0, 1]]
pad_const1 = tf.pad(const1,
paddings, "CONSTANT")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("Pad:0", shape=(2, 4),
dtype=int32)
Tensor("Shape_1:0", shape=(2,),
dtype=int32)
[[1 2 3 0]
[4 5 6
0]]
In [4]:
paddings = [[0, 0,], [1, 0]]
pad_const1 = tf.pad(const1,
paddings, "CONSTANT")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("Pad_1:0", shape=(2, 4),
dtype=int32)
Tensor("Shape_2:0", shape=(2,),
dtype=int32)
[[0 1 2 3]
[0 4 5
6]]
In [5]:
paddings = [[0, 0,], [1, 1]]
pad_const1 = tf.pad(const1,
paddings, "CONSTANT")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("Pad_2:0", shape=(2, 5),
dtype=int32)
Tensor("Shape_3:0", shape=(2,),
dtype=int32)
[[0 1 2 3 0]
[0 4 5 6
0]]
In [6]:
paddings = [[0, 1,], [0, 0]]
pad_const1 = tf.pad(const1,
paddings, "CONSTANT")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("Pad_3:0", shape=(3, 3),
dtype=int32)
Tensor("Shape_4:0", shape=(2,),
dtype=int32)
[[1 2 3]
[4 5 6]
[0 0 0]]
In [7]:
paddings = [[1, 0,], [0, 0]]
pad_const1 = tf.pad(const1,
paddings, "CONSTANT")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("Pad_4:0", shape=(3, 3),
dtype=int32)
Tensor("Shape_5:0", shape=(2,),
dtype=int32)
[[0 0 0]
[1 2 3]
[4 5 6]]
In [8]:
paddings = [[1, 1,], [0, 0]]
pad_const1 = tf.pad(const1,
paddings, "CONSTANT")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("Pad_5:0", shape=(4, 3),
dtype=int32)
Tensor("Shape_6:0", shape=(2,),
dtype=int32)
[[0 0 0]
[1 2 3]
[4 5 6]
[0 0 0]]
In [9]:
paddings = [[1, 1,], [1, 1]]
pad_const1 = tf.pad(const1,
paddings, "CONSTANT")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("Pad_6:0", shape=(4, 5),
dtype=int32)
Tensor("Shape_7:0", shape=(2,),
dtype=int32)
[[0 0 0 0 0]
[0 1 2 3
0]
[0 4 5 6
0]
[0 0 0 0
0]]
In [10]:
# [[1, 2, 3], [4, 5, 6]]
paddings = [[0, 0,], [0, 1]]
pad_const1 = tf.pad(const1,
paddings, "REFLECT")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("MirrorPad:0", shape=(2, 4),
dtype=int32)
Tensor("Shape_8:0", shape=(2,),
dtype=int32)
[[1 2 3 2]
[4 5 6
5]]
In [11]:
paddings = [[0, 0,], [1, 0]]
pad_const1 = tf.pad(const1,
paddings, "REFLECT")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("MirrorPad_1:0", shape=(2, 4),
dtype=int32)
Tensor("Shape_9:0", shape=(2,),
dtype=int32)
[[2 1 2 3]
[5 4 5
6]]
In [12]:
paddings = [[0, 0,], [1, 1]]
pad_const1 = tf.pad(const1,
paddings, "REFLECT")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("MirrorPad_2:0", shape=(2, 5),
dtype=int32)
Tensor("Shape_10:0", shape=(2,),
dtype=int32)
[[2 1 2 3 2]
[5 4 5 6
5]]
In [13]:
paddings = [[0, 1,], [0, 0]]
pad_const1 = tf.pad(const1,
paddings, "REFLECT")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("MirrorPad_3:0", shape=(3, 3),
dtype=int32)
Tensor("Shape_11:0", shape=(2,),
dtype=int32)
[[1 2 3]
[4 5 6]
[1 2 3]]
In [14]:
paddings = [[1, 0,], [0, 0]]
pad_const1 = tf.pad(const1,
paddings, "REFLECT")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("MirrorPad_4:0", shape=(3, 3),
dtype=int32)
Tensor("Shape_12:0", shape=(2,),
dtype=int32)
[[4 5 6]
[1 2 3]
[4 5 6]]
In [15]:
paddings = [[1, 1,], [0, 0]]
pad_const1 = tf.pad(const1,
paddings, "REFLECT")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("MirrorPad_5:0", shape=(4, 3),
dtype=int32)
Tensor("Shape_13:0", shape=(2,),
dtype=int32)
[[4 5 6]
[1 2 3]
[4 5 6]
[1 2 3]]
In [16]:
paddings = [[1, 1,], [1, 1]]
pad_const1 = tf.pad(const1,
paddings, "REFLECT")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("MirrorPad_6:0", shape=(4, 5),
dtype=int32)
Tensor("Shape_14:0", shape=(2,),
dtype=int32)
[[5 4 5 6 5]
[2 1 2 3
2]
[5 4 5 6
5]
[2 1 2 3
2]]
In [17]:
paddings = [[1, 1,], [2, 2]]
pad_const1 = tf.pad(const1,
paddings, "REFLECT")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("MirrorPad_7:0", shape=(4, 7),
dtype=int32)
Tensor("Shape_15:0", shape=(2,),
dtype=int32)
[[6 5 4 5 6 5 4]
[3 2 1 2
3 2 1]
[6 5 4 5
6 5 4]
[3 2 1 2
3 2 1]]
In [18]:
# [[1, 2, 3], [4, 5, 6]]
paddings = [[0, 0,], [0, 1]]
pad_const1 = tf.pad(const1,
paddings, "SYMMETRIC")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("MirrorPad_8:0", shape=(2, 4),
dtype=int32)
Tensor("Shape_16:0", shape=(2,),
dtype=int32)
[[1 2 3 3]
[4 5 6
6]]
In [19]:
paddings = [[0, 0,], [1, 0]]
pad_const1 = tf.pad(const1,
paddings, "SYMMETRIC")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("MirrorPad_9:0", shape=(2, 4),
dtype=int32)
Tensor("Shape_17:0", shape=(2,),
dtype=int32)
[[1 1 2 3]
[4 4 5
6]]
In [20]:
paddings = [[0, 0,], [1, 1]]
pad_const1 = tf.pad(const1,
paddings, "SYMMETRIC")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("MirrorPad_10:0", shape=(2,
5), dtype=int32)
Tensor("Shape_18:0", shape=(2,),
dtype=int32)
[[1 1 2 3 3]
[4 4 5 6
6]]
In [21]:
paddings = [[0, 1,], [0, 0]]
pad_const1 = tf.pad(const1,
paddings, "SYMMETRIC")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("MirrorPad_11:0", shape=(3,
3), dtype=int32)
Tensor("Shape_19:0", shape=(2,),
dtype=int32)
[[1 2 3]
[4 5 6]
[4 5 6]]
In [22]:
paddings = [[1, 0,], [0, 0]]
pad_const1 = tf.pad(const1,
paddings, "SYMMETRIC")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("MirrorPad_12:0", shape=(3,
3), dtype=int32)
Tensor("Shape_20:0", shape=(2,),
dtype=int32)
[[1 2 3]
[1 2 3]
[4 5 6]]
In [23]:
paddings = [[1, 1,], [0, 0]]
pad_const1 = tf.pad(const1,
paddings, "SYMMETRIC")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("MirrorPad_13:0", shape=(4,
3), dtype=int32)
Tensor("Shape_21:0", shape=(2,),
dtype=int32)
[[1 2 3]
[1 2 3]
[4 5 6]
[4 5 6]]
In [24]:
paddings = [[1, 1,], [1, 1]]
pad_const1 = tf.pad(const1,
paddings, "SYMMETRIC")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("MirrorPad_14:0", shape=(4,
5), dtype=int32)
Tensor("Shape_22:0", shape=(2,),
dtype=int32)
[[1 1 2 3 3]
[1 1 2 3
3]
[4 4 5 6
6]
[4 4 5 6
6]]
In [25]:
paddings = [[1, 1,], [2, 2]]
pad_const1 = tf.pad(const1,
paddings, "SYMMETRIC")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("MirrorPad_15:0", shape=(4,
7), dtype=int32)
Tensor("Shape_23:0", shape=(2,),
dtype=int32)
[[2 1 1 2 3 3 2]
[2 1 1 2
3 3 2]
[5 4 4 5
6 6 5]
[5 4 4 5
6 6 5]]
In [26]:
paddings = [[1, 1,], [3, 3]]
pad_const1 = tf.pad(const1,
paddings, "SYMMETRIC")
print(pad_const1)
print(tf.shape(pad_const1))
tfutil.print_operation_value(pad_const1)
Tensor("MirrorPad_16:0", shape=(4,
9), dtype=int32)
Tensor("Shape_24:0", shape=(2,),
dtype=int32)
[[3 2 1 1 2 3 3 2 1]
[3 2 1 1
2 3 3 2 1]
[6 5 4 4
5 6 6 5 4]
[6 5 4 4
5 6 6 5 4]]
tf.concat(concat_dim, values, name='concat')
예시:
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat(0, [t1, t2]) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tf.concat(1, [t1, t2]) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
# tensor t3의 구조(shape)는 [2, 3]
# tensor t4의 구조(shape)는 [2, 3]
tf.shape(tf.concat(0, [t3, t4])) ==> [4, 3]
tf.shape(tf.concat(1, [t3, t4])) ==> [2, 6]
인자:
concat_dim
: 0-D int32
형 Tensor
. 텐서를 이어붙일 차원.values
: Tensor
들의 리스트 또는 Tensor
.name
: 오퍼레이션의 명칭. (선택사항)
반환값:
출처: 텐서 변환
Tensorflow tf. 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
# [[1, 2, 3], [4, 5, 6]]
const1 = tf.constant(np.array([[1, 2, 3], [4, 5, 6]]), dtype=tf.int32)
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(2, 3),
dtype=int32)
Tensor("Shape:0", shape=(2,),
dtype=int32)
[[1 2 3]
[4 5 6]]
In [3]:
# [[7, 8, 9], [10, 11, 12]]
const2 = tf.constant(np.array([[7, 8, 9], [10, 11, 12]]), dtype=tf.int32)
print(const2)
print(tf.shape(const2))
tfutil.print_constant(const2)
Tensor("Const_1:0", shape=(2, 3),
dtype=int32)
Tensor("Shape_1:0", shape=(2,),
dtype=int32)
[[ 7
8 9]
[10 11
12]]
In [4]:
cc_const1 = tf.concat([const1,
const2], 0)
print(cc_const1)
print(tf.shape(cc_const1))
tfutil.print_operation_value(cc_const1)
Tensor("concat:0", shape=(4, 3),
dtype=int32)
Tensor("Shape_2:0", shape=(2,),
dtype=int32)
[[ 1
2 3]
[ 4 5 6]
[ 7 8 9]
[10 11
12]]
In [5]:
cc_const1 = tf.concat([const1,
const2], 1)
print(cc_const1)
print(tf.shape(cc_const1))
tfutil.print_operation_value(cc_const1)
Tensor("concat_1:0", shape=(2, 6),
dtype=int32)
Tensor("Shape_3:0", shape=(2,),
dtype=int32)
[[ 1
2 3 7
8 9]
[ 4 5 6 10
11 12]]
tf.stack(values, axis=0, name='stack')
See the guides: Layers (contrib) > Higher level ops for building neural network layers, Tensor Transformations > Slicing and Joining
Stacks a list of rank-R
tensors into one rank-(R+1)
tensor.
Packs the list of tensors in values
into a tensor with rank one higher than each tensor in values
, by packing them along the axis
dimension. Given a list of length N
of tensors of shape (A, B, C)
;
if axis == 0
then the output
tensor will have the shape (N, A, B, C)
. if axis == 1
then the output
tensor will have the shape (A, N, B, C)
. Etc.
For example:
x = tf.constant([1, 4])
y = tf.constant([2, 5])
z = tf.constant([3, 6])
tf.stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]] (Pack along first dim.)
tf.stack([x, y, z], axis=1) # [[1, 2, 3], [4, 5, 6]]
This is the opposite of unstack. The numpy equivalent is
tf.stack([x, y, z]) = np.stack([x, y, z])
Args:
values
: A list of Tensor
objects with the same shape and type.axis
: An int
. The axis to stack along. Defaults to the first dimension. Negative values wrap around, so the valid range is [-(R+1), R+1)
.name
: A name for this operation (optional).
Returns:
output
: A stacked Tensor
with the same type as values
.
Raises:
ValueError
: If axis
is out of the range [-(R+1), R+1).
출처: API r1.4
Tensorflow tf.stack 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
# [[1, 2, 3], [4, 5, 6]]
# [[7, 8, 9], [10, 11, 12]]
const1 = tf.constant(np.array([1, 2, 3]), dtype=tf.int32)
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(3,),
dtype=int32)
Tensor("Shape:0", shape=(1,),
dtype=int32)
[1 2 3]
In [3]:
const2 = tf.constant(np.array([4, 5, 6]), dtype=tf.int32)
print(const2)
print(tf.shape(const2))
tfutil.print_constant(const2)
Tensor("Const_1:0", shape=(3,),
dtype=int32)
Tensor("Shape_1:0", shape=(1,),
dtype=int32)
[4 5 6]
In [4]:
const3 = tf.constant(np.array([7, 8, 9]), dtype=tf.int32)
print(const3)
print(tf.shape(const3))
tfutil.print_constant(const3)
Tensor("Const_2:0", shape=(3,),
dtype=int32)
Tensor("Shape_2:0", shape=(1,),
dtype=int32)
[7 8 9]
In [5]:
pack_const1 = tf.stack([const1,
const2, const3])
print(pack_const1)
print(tf.shape(pack_const1))
tfutil.print_operation_value(pack_const1)
Tensor("stack:0", shape=(3, 3),
dtype=int32)
Tensor("Shape_3:0", shape=(2,),
dtype=int32)
[[1 2 3]
[4 5 6]
[7 8 9]]
In [6]:
pack_const1 = tf.stack([const1,
const2, const3], axis=1)
print(pack_const1)
print(tf.shape(pack_const1))
tfutil.print_operation_value(pack_const1)
Tensor("stack_1:0", shape=(3, 3),
dtype=int32)
Tensor("Shape_4:0", shape=(2,),
dtype=int32)
[[1 4 7]
[2 5 8]
[3 6 9]]
tf.unstack(value, num=None, axis=0, name='unstack')
Defined in tensorflow/python/ops/array_ops.py
.
See the guide: Tensor Transformations > Slicing and Joining
Unpacks the given dimension of a rank-R
tensor into rank-(R-1)
tensors.Unpacks num
tensors from value
by chipping it along the axis
dimension. If num
is not specified (the default), it is inferred from value
's shape. If value.shape[axis]
is not known, ValueError
is raised.
For example, given a tensor of shape (A, B, C, D)
;
If axis == 0
then the i'th tensor in output
is the slice value[i, :, :, :]
and each tensor in output
will have shape (B, C, D)
. (Note that the dimension unpacked along is gone, unlike split
).
If axis == 1
then the i'th tensor in output
is the slice value[:, i, :, :]
and each tensor in output
will have shape (A, C, D)
. Etc.
This is the opposite of stack. The numpy equivalent is
tf.unstack(x, n) = np.unstack(x)
Args:
value
: A rank R > 0
Tensor
to be unstacked.num
: An int
. The length of the dimension axis
. Automatically inferred if None
(the default).axis
: An int
. The axis to unstack along. Defaults to the first dimension. Negative values wrap around, so the valid range is [-R, R)
.name
: A name for the operation (optional).
Returns:
The list of Tensor
objects unstacked from value
.
Raises:
ValueError
: If num
is unspecified and cannot be inferred.ValueError
: If axis
is out of the range [-R, R).
출처: 텐서 변환
Tensorflow tf.unstack 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
const1 = tf.constant(np.array([1, 2, 3]), dtype=tf.int32)
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(3,),
dtype=int32)
Tensor("Shape:0", shape=(1,),
dtype=int32)
[1 2 3]
In [3]:
const2 = tf.constant(np.array([4, 5, 6]), dtype=tf.int32)
print(const2)
print(tf.shape(const2))
tfutil.print_constant(const2)
Tensor("Const_1:0", shape=(3,),
dtype=int32)
Tensor("Shape_1:0", shape=(1,),
dtype=int32)
[4 5 6]
In [4]:
const3 = tf.constant(np.array([7, 8, 9]), dtype=tf.int32)
print(const3)
print(tf.shape(const3))
tfutil.print_constant(const3)
Tensor("Const_2:0", shape=(3,),
dtype=int32)
Tensor("Shape_2:0", shape=(1,),
dtype=int32)
[7 8 9]
In [5]:
pack_const1 = tf.stack([const1,
const2, const3])
print(pack_const1)
print(tf.shape(pack_const1))
tfutil.print_operation_value(pack_const1)
Tensor("stack:0", shape=(3, 3),
dtype=int32)
Tensor("Shape_3:0", shape=(2,),
dtype=int32)
[[1 2 3]
[4 5 6]
[7 8 9]]
In [6]:
up_const1, up_const2, up_const3 = tf.unstack(pack_const1)
print(up_const1)
print(tf.shape(up_const1))
tfutil.print_constant(up_const1)
Tensor("unstack:0", shape=(3,),
dtype=int32)
Tensor("Shape_4:0", shape=(1,),
dtype=int32)
[1 2 3]
In [7]:
print(up_const2)
print(tf.shape(up_const2))
tfutil.print_constant(up_const2)
Tensor("unstack:1", shape=(3,),
dtype=int32)
Tensor("Shape_5:0", shape=(1,),
dtype=int32)
[4 5 6]
In [8]:
print(up_const3)
print(tf.shape(up_const3))
tfutil.print_constant(up_const3)
Tensor("unstack:2", shape=(3,),
dtype=int32)
Tensor("Shape_6:0", shape=(1,),
dtype=int32)
[7 8 9]
In [9]:
pack_const1 = tf.stack([const1,
const2, const3], axis=1)
print(pack_const1)
print(tf.shape(pack_const1))
tfutil.print_operation_value(pack_const1)
Tensor("stack_1:0", shape=(3, 3),
dtype=int32)
Tensor("Shape_7:0", shape=(2,),
dtype=int32)
[[1 4 7]
[2 5 8]
[3 6 9]]
In [10]:
up_const1, up_const2, up_const3 = tf.unstack(pack_const1)
print(up_const1)
print(tf.shape(up_const1))
tfutil.print_constant(up_const1)
Tensor("unstack_1:0", shape=(3,),
dtype=int32)
Tensor("Shape_8:0", shape=(1,),
dtype=int32)
[1 4 7]
In [11]:
print(up_const2)
print(tf.shape(up_const2))
tfutil.print_constant(up_const2)
Tensor("unstack_1:1", shape=(3,),
dtype=int32)
Tensor("Shape_9:0", shape=(1,),
dtype=int32)
[2 5 8]
In [12]:
print(up_const3)
print(tf.shape(up_const3))
tfutil.print_constant(up_const3)
Tensor("unstack_1:2", shape=(3,),
dtype=int32)
Tensor("Shape_10:0", shape=(1,),
dtype=int32)
[3 6 9]
tf.reverse_sequence(input, seq_lengths, seq_dim, batch_dim=None, name=None)
예시:
# 다음과 같이 설정합니다.
batch_dim = 0
seq_dim = 1
input.dims = (4, 8, ...)
seq_lengths = [7, 2, 3, 5]
# 입력 텐서의 각각의 슬라이스는 seq_dim 차원에서 seq_lengths 까지 반전됩니다.
output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
# seq_lengths 이후의 부분은 그대로 들어갑니다.
output[0, 7:, :, ...] = input[0, 7:, :, ...]
output[1, 2:, :, ...] = input[1, 2:, :, ...]
output[2, 3:, :, ...] = input[2, 3:, :, ...]
output[3, 2:, :, ...] = input[3, 2:, :, ...]
인자:
input
: Tensor
. 반전시킬 텐서.seq_lengths
: int64
형 1-D Tensor
. 길이는 input.dims(batch_dim)
이며, max(seq_lengths) < input.dims(seq_dim)
을 만족합니다.seq_dim
: int
. (부분적으로) 반전되는 차원.batch_dim
: int
. 텐서의 반전이 이루어지는 차원, 기본값은 0
. (선택사항)name
: 오퍼레이션의 명칭. (선택사항)
반환값:
출처: 텐서 변환
Tensorflow tf.reverse_sequence 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
x = [[1, 2, 3, 4, 0, 0, 0],
[1, 2, 3, 0, 0, 0, 0],
[1, 2, 3, 4, 5, 6, 7]]
const1
= tf.constant(np.array(x), dtype=tf.int32)
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(3, 7), dtype=int32)
Tensor("Shape:0", shape=(2,), dtype=int32)
[[1 2 3 4 0 0 0]
[1 2 3 0 0 0 0]
[1 2 3 4 5 6 7]]
In [3]:
seq_lens
= [4, 3, 7]
rs_const1
= tf.reverse_sequence(const1, seq_lens, seq_dim=1, batch_dim=0)
print(rs_const1)
print(tf.shape(rs_const1))
tfutil.print_operation_value(rs_const1)
Tensor("ReverseSequence:0", shape=(3, 7),
dtype=int32)
Tensor("Shape_1:0", shape=(2,), dtype=int32)
[[4 3 2 1 0 0 0]
[3 2 1 0 0 0 0]
[7 6 5 4 3 2 1]]
In [4]:
x = [[[1, 2, 3, 4, 0, 0, 0], [1, 0, 2, 0, 0, 0, 0]],
[[1, 2, 3, 4, 5, 0, 0], [0, 2, 1, 4, 0, 0, 0]],
[[1, 2, 3, 4, 5, 6 ,7], [1, 2, 3, 4, 0, 6, 0]]]
const2
= tf.constant(np.array(x), dtype=tf.int32)
print(const2)
print(tf.shape(const2))
tfutil.print_constant(const2)
Tensor("Const_1:0", shape=(3, 2, 7),
dtype=int32)
Tensor("Shape_2:0", shape=(3,), dtype=int32)
[[[1 2 3 4 0 0 0]
[1 0 2 0 0 0 0]]
[[1 2 3 4 5 0 0]
[0 2 1 4 0 0 0]]
[[1 2 3 4 5 6 7]
[1 2 3 4 0 6 0]]]
In [5]:
seq_lens
= [4, 3, 5]
rs_const2
= tf.reverse_sequence(const2, seq_lens, seq_dim=2, batch_dim=0)
print(rs_const2)
print(tf.shape(rs_const2))
tfutil.print_operation_value(rs_const2)
Tensor("ReverseSequence_1:0", shape=(3, 2, 7),
dtype=int32)
Tensor("Shape_3:0", shape=(3,), dtype=int32)
[[[4 3 2 1 0 0 0]
[0 2 0 1 0 0 0]]
[[3 2 1 4 5 0 0]
[1 2 0 4 0 0 0]]
[[5 4 3 2 1 6 7]
[0 4 3 2 1 6 0]]]
In [6]:
seq_lens
= [4, 3]
rs_const2
= tf.reverse_sequence(const2, seq_lens, seq_dim=2, batch_dim=1)
print(rs_const2)
print(tf.shape(rs_const2))
tfutil.print_operation_value(rs_const2)
Tensor("ReverseSequence_2:0", shape=(3, 2, 7),
dtype=int32)
Tensor("Shape_4:0", shape=(3,), dtype=int32)
[[[4 3 2 1 0 0 0]
[2 0 1 0 0 0 0]]
[[4 3 2 1 5 0 0]
[1 2 0 4 0 0 0]]
[[4 3 2 1 5 6 7]
[3 2 1 4 0 6 0]]]
tf.reverse(tensor, dims, name=None)
예시:
# tensor 't'는 [[[[ 0, 1, 2, 3],
# [ 4, 5, 6, 7],
# [ 8, 9, 10, 11]],
# [[12, 13, 14, 15],
# [16, 17, 18, 19],
# [20, 21, 22, 23]]]]
# tensor 't'의 구조(shape)는 [1, 2, 3, 4]
# 'dims'가 [3] 일 때
reverse(t, dims) ==> [[[[ 3, 2, 1, 0],
[ 7, 6, 5, 4],
[ 11, 10, 9, 8]],
[[15, 14, 13, 12],
[19, 18, 17, 16],
[23, 22, 21, 20]]]]
# 'dims'가 [1] 일 때
reverse(t, dims) ==> [[[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]
[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]]]]
# 'dims'가 [2] 일 때
reverse(t, dims) ==> [[[[8, 9, 10, 11],
[4, 5, 6, 7],
[0, 1, 2, 3]]
[[20, 21, 22, 23],
[16, 17, 18, 19],
[12, 13, 14, 15]]]]
인자:
tensor
: Tensor
. 자료형이 uint8
, int8
, int32
, bool
, half
, float32
, float64
중 하나여야 합니다. Up to 8-D.dims
: bool
형 1-D Tensor
. 반전시킬 차원을 나타냅니다.name
: 오퍼레이션의 명칭. (선택사항)
반환값:
출처: 텐서 변환
Tensorflow tf.reverse 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
x = [[[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]]]
const1 = tf.constant(np.array(x),
dtype=tf.int32)
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(1, 2, 3, 4),
dtype=int32)
Tensor("Shape:0", shape=(4,),
dtype=int32)
[[[[ 0
1 2 3]
[
4 5
6 7]
[
8 9 10 11]]
[[12 13
14 15]
[16 17
18 19]
[20 21
22 23]]]]
In [3]:
dims = [3]
dims_const1 = tf.reverse(const1,
dims)
print(dims_const1)
print(tf.shape(dims_const1))
tfutil.print_operation_value(dims_const1)
Tensor("ReverseV2:0", shape=(1, 2, 3,
4), dtype=int32)
Tensor("Shape_1:0", shape=(4,),
dtype=int32)
[[[[ 3
2 1 0]
[ 7
6 5 4]
[11
10 9
8]]
[[15 14
13 12]
[19 18
17 16]
[23 22
21 20]]]]
In [4]:
dims = [1]
dims_const1 = tf.reverse(const1,
dims)
print(dims_const1)
print(tf.shape(dims_const1))
tfutil.print_operation_value(dims_const1)
Tensor("ReverseV2_1:0", shape=(1, 2,
3, 4), dtype=int32)
Tensor("Shape_2:0", shape=(4,),
dtype=int32)
[[[[12 13 14 15]
[16 17
18 19]
[20 21
22 23]]
[[
0 1
2 3]
[
4 5
6 7]
[
8 9 10 11]]]]
In [5]:
dims = [2]
dims_const1 = tf.reverse(const1,
dims)
print(dims_const1)
print(tf.shape(dims_const1))
tfutil.print_operation_value(dims_const1)
Tensor("ReverseV2_2:0", shape=(1, 2,
3, 4), dtype=int32)
Tensor("Shape_3:0", shape=(4,),
dtype=int32)
[[[[ 8 9
10 11]
[
4 5
6 7]
[
0 1
2 3]]
[[20 21
22 23]
[16 17
18 19]
[12 13
14 15]]]]
tf.transpose(a, perm=None, name='transpose')
예시:
# 'x'는 [[1 2 3]
# [4 5 6]]
tf.transpose(x) ==> [[1 4]
[2 5]
[3 6]]
# perm의 기본값과 동일한 경우
tf.transpose(x, perm=[1, 0]) ==> [[1 4]
[2 5]
[3 6]]
# 'perm'은 차원이 n > 2인 텐서일 경우 더 유용합니다.
# 'x'는 [[[1 2 3]
# [4 5 6]]
# [[7 8 9]
# [10 11 12]]]
# 차원-0의 행렬들에 대해서 전치를 수행합니다.
tf.transpose(x, perm=[0, 2, 1]) ==> [[[1 4]
[2 5]
[3 6]]
[[7 10]
[8 11]
[9 12]]]
인자:
a
: Tensor
.perm
: a
의 차원들의 순열.name
: 오퍼레이션의 명칭. (선택사항)
반환값:
출처: 텐서 변환
Tensorflow tf.transpose 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
'''
A=[2,3,4] matrix, using perm(1,0,2) will get B=[3,2,4]
Index = (0,1,2)
A = [2,3,4]
Perm = (1,0,2)
B = (3,2,4) --> Perm 1 from Index 1 (3), Perm 0 from
Index 0 (2), Perm 2 from Index 2 (4) --> so get (3,2,4
'''
In [3]:
x = [[1, 2, 3, 4, 5, 6]]
const1 = tf.constant(np.array(x),
dtype=tf.int32)
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(1, 6),
dtype=int32)
Tensor("Shape:0", shape=(2,),
dtype=int32)
[[1 2 3 4 5 6]]
In [4]:
perm = [1, 0]
trans_const1 = tf.transpose(const1,
perm)
print(trans_const1)
print(tf.shape(trans_const1))
tfutil.print_operation_value(trans_const1)
Tensor("transpose:0", shape=(6, 1),
dtype=int32)
Tensor("Shape_1:0", shape=(2,),
dtype=int32)
[[1]
[2]
[3]
[4]
[5]
[6]]
In [5]:
perm = [0, 1]
trans_const1 = tf.transpose(const1,
perm)
print(trans_const1)
print(tf.shape(trans_const1))
tfutil.print_operation_value(trans_const1)
Tensor("transpose_1:0", shape=(1, 6),
dtype=int32)
Tensor("Shape_2:0", shape=(2,),
dtype=int32)
[[1 2 3 4 5 6]]
In [6]:
x = [[1, 2, 3], [4, 5, 6]]
const2 = tf.constant(np.array(x),
dtype=tf.int32)
print(const2)
print(tf.shape(const2))
tfutil.print_constant(const2)
Tensor("Const_1:0", shape=(2, 3),
dtype=int32)
Tensor("Shape_3:0", shape=(2,),
dtype=int32)
[[1 2 3]
[4 5 6]]
In [7]:
perm = [1, 0]
trans_const2 = tf.transpose(const2,
perm)
print(trans_const2)
print(tf.shape(trans_const2))
tfutil.print_operation_value(trans_const2)
Tensor("transpose_2:0", shape=(3, 2),
dtype=int32)
Tensor("Shape_4:0", shape=(2,),
dtype=int32)
[[1 4]
[2 5]
[3 6]]
In [8]:
perm = [0, 1]
trans_const2 = tf.transpose(const2,
perm)
print(trans_const2)
print(tf.shape(trans_const2))
tfutil.print_operation_value(trans_const2)
Tensor("transpose_3:0", shape=(2, 3),
dtype=int32)
Tensor("Shape_5:0", shape=(2,),
dtype=int32)
[[1 2 3]
[4 5 6]]
In [9]:
x = [[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]]
const3 = tf.constant(np.array(x),
dtype=tf.int32)
print(const3)
print(tf.shape(const3))
tfutil.print_constant(const3)
Tensor("Const_2:0", shape=(2, 2, 3),
dtype=int32)
Tensor("Shape_6:0", shape=(3,),
dtype=int32)
[[[ 1 2 3]
[
4 5
6]]
[[
7 8
9]
[10 11
12]]]
In [10]:
perm = [1, 0, 2]
trans_const3 = tf.transpose(const3,
perm)
print(trans_const3)
print(tf.shape(trans_const3))
tfutil.print_operation_value(trans_const3)
Tensor("transpose_4:0", shape=(2, 2,
3), dtype=int32)
Tensor("Shape_7:0", shape=(3,),
dtype=int32)
[[[ 1
2 3]
[
7 8
9]]
[[
4 5
6]
[10 11
12]]]
In [11]:
perm = [2, 0, 1]
trans_const3 = tf.transpose(const3,
perm)
print(trans_const3)
print(tf.shape(trans_const3))
tfutil.print_operation_value(trans_const3)
Tensor("transpose_5:0", shape=(3, 2,
2), dtype=int32)
Tensor("Shape_8:0", shape=(3,),
dtype=int32)
[[[ 1 4]
[ 7
10]]
[[
2 5]
[ 8
11]]
[[
3 6]
[ 9
12]]]
In [12]:
perm = [2, 1, 0]
trans_const3 = tf.transpose(const3,
perm)
print(trans_const3)
print(tf.shape(trans_const3))
tfutil.print_operation_value(trans_const3)
Tensor("transpose_6:0", shape=(3, 2,
2), dtype=int32)
Tensor("Shape_9:0", shape=(3,),
dtype=int32)
[[[ 1 7]
[ 4
10]]
[[
2 8]
[ 5
11]]
[[
3 9]
[ 6
12]]]
In [13]:
perm = [1, 2, 0]
trans_const3 = tf.transpose(const3,
perm)
print(trans_const3)
print(tf.shape(trans_const3))
tfutil.print_operation_value(trans_const3)
Tensor("transpose_7:0", shape=(2, 3,
2), dtype=int32)
Tensor("Shape_10:0", shape=(3,),
dtype=int32)
[[[ 1 7]
[
2 8]
[
3 9]]
[[ 4 10]
[ 5 11]
[ 6
12]]]
In [14]:
perm = [0, 2, 1]
trans_const3 = tf.transpose(const3,
perm)
print(trans_const3)
print(tf.shape(trans_const3))
tfutil.print_operation_value(trans_const3)
Tensor("transpose_8:0", shape=(2, 3,
2), dtype=int32)
Tensor("Shape_11:0", shape=(3,),
dtype=int32)
[[[ 1 4]
[
2 5]
[
3 6]]
[[ 7 10]
[ 8 11]
[ 9
12]]]
In [15]:
perm = [0, 1, 2]
trans_const3 = tf.transpose(const3,
perm)
print(trans_const3)
print(tf.shape(trans_const3))
tfutil.print_operation_value(trans_const3)
Tensor("transpose_9:0", shape=(2, 2,
3), dtype=int32)
Tensor("Shape_12:0", shape=(3,),
dtype=int32)
[[[ 1
2 3]
[
4 5
6]]
[[
7 8
9]
[10 11
12]]]
tf.extract_image_patches(images, padding, ksizes=None, strides=None, rates=None, name=None)
인자:
images
: Tensor
. 자료형이 float32
, float64
, int32
, int64
, uint8
, int16
, int8
, uint16
, half
중 하나여야 합니다. 구조(shape)가 [batch, in_rows, in_cols, depth]
인 4-D 텐서입니다.strides
: int
들의 리스트. 기본값은 []
. 길이 4의 1-D 텐서. 이미지에서 추출할 두 patch
사이의 중심 거리를 지정합니다. [1, stride_rows, stride_cols, 1]
와 같은 형태여야 합니다. (선택사항)rates
: int
들의 리스트. 기본값은 []
. 길이 4의 1-D 텐서. 입력의 스트라이드로, 입력에서 두 연속된 patch
샘플들이 얼마나 멀리 떨어져 있어야 할 지 지정합니다. [1, rate_rows, rate_cols, 1]
와 같은 형태여야 합니다. (선택사항) patch
를 추출할 때 patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)
으로 놓고 공간적으로 rates
의 인자로 부차추출(subsampling)하는 것과 동일합니다.name
: 오퍼레이션의 명칭. (선택사항)
반환값:
출처: 텐서 변환
Tensorflow tf.extract_image_patches 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
n = 10
# images is a 1 x 10 x 10 x 1 array that contains the numbers 1 through
100 in order
images = [[[[x * n + y + 1] for y in range(n)] for x in range(n)]]
const1 = tf.constant(np.array(images),
dtype=tf.int32)
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(1, 10, 10,
1), dtype=int32)
Tensor("Shape:0", shape=(4,),
dtype=int32)
[[[[ 1]
[ 2]
[ 3]
[ 4]
[ 5]
[ 6]
[ 7]
[ 8]
[ 9]
[ 10]]
[[ 11]
[ 12]
[ 13]
[ 14]
[ 15]
[ 16]
[ 17]
[ 18]
[ 19]
[ 20]]
……………
[[ 91]
[ 92]
[ 93]
[ 94]
[ 95]
[ 96]
[ 97]
[ 98]
[ 99]
[100]]]]
In [3]:
# Ksizes Test
# output_depth = ksize_rows * ksize_cols * depth = (1 x 1 x 1 ) = 1
# ksizes: raws: [1] col: [1]
ksizes = [1, 1, 1, 1]
strides = [1, 3, 3, 1]
rates = [1, 1, 1, 1]
exi_const1 = tf.extract_image_patches(const1,
ksizes, strides, rates, padding='VALID')
print(exi_const1)
print(tf.shape(exi_const1))
tfutil.print_operation_value(exi_const1)
Tensor("ExtractImagePatches:0",
shape=(1, 4, 4, 1), dtype=int32)
Tensor("Shape_1:0", shape=(4,),
dtype=int32)
[[[[ 1]
[ 4]
[ 7]
[ 10]]
[[ 31]
[ 34]
[ 37]
[ 40]]
[[ 61]
[ 64]
[ 67]
[ 70]]
[[ 91]
[ 94]
[ 97]
[100]]]]
In [4]:
# Ksizes Test
# output_depth = ksize_rows * ksize_cols * depth = (1 x 2 x 1 ) = 2
# ksizes: raws: [1, 2] col: [1, 2]
ksizes = [1, 1, 2, 1]
strides = [1, 3, 3, 1]
rates = [1, 1, 1, 1]
exi_const1 = tf.extract_image_patches(const1,
ksizes, strides, rates, padding='VALID')
print(exi_const1)
print(tf.shape(exi_const1))
tfutil.print_operation_value(exi_const1)
Tensor("ExtractImagePatches_1:0",
shape=(1, 4, 3, 2), dtype=int32)
Tensor("Shape_2:0", shape=(4,),
dtype=int32)
[[[[ 1
2]
[
4 5]
[ 7 8]]
[[31
32]
[34
35]
[37
38]]
[[61
62]
[64
65]
[67
68]]
[[91
92]
[94
95]
[97
98]]]]
In [5]:
# Ksizes Test
# output_depth = ksize_rows * ksize_cols * depth = (1 x 3 x 1 ) = 3
# ksizes: raws: [1, 2, 3] col: [1,
2, 3]
ksizes = [1, 1, 3, 1]
strides = [1, 3, 3, 1]
rates = [1, 1, 1, 1]
exi_const1 = tf.extract_image_patches(const1,
ksizes, strides, rates, padding='VALID')
print(exi_const1)
print(tf.shape(exi_const1))
tfutil.print_operation_value(exi_const1)
Tensor("ExtractImagePatches_2:0",
shape=(1, 4, 3, 3), dtype=int32)
Tensor("Shape_3:0", shape=(4,),
dtype=int32)
[[[[ 1
2 3]
[
4 5
6]
[
7 8
9]]
[[31 32
33]
[34 35
36]
[37 38
39]]
[[61 62
63]
[64 65
66]
[67 68
69]]
[[91 92
93]
[94 95
96]
[97 98
99]]]]
In [6]:
# Ksizes Test
# output_depth = ksize_rows * ksize_cols * depth = (2 x 3 x 1 ) = 6 ->
[ 1 2
3 11 12 13] ..
# ksizes: raws: [1 2 3 ]
col: [1 2 3 11 12 13]
ksizes = [1, 2, 3, 1]
strides = [1, 3, 3, 1]
rates = [1, 1, 1, 1]
exi_const1 = tf.extract_image_patches(const1,
ksizes, strides, rates, padding='VALID')
print(exi_const1)
print(tf.shape(exi_const1))
tfutil.print_operation_value(exi_const1)
Tensor("ExtractImagePatches_3:0",
shape=(1, 3, 3, 6), dtype=int32)
Tensor("Shape_4:0", shape=(4,),
dtype=int32)
[[[[ 1
2 3 11 12 13]
[
4 5
6 14 15 16]
[
7 8
9 17 18 19]]
[[31 32
33 41 42 43]
[34 35
36 44 45 46]
[37 38
39 47 48 49]]
[[61 62
63 71 72 73]
[64 65
66 74 75 76]
[67 68
69 77 78 79]]]]
In [7]:
# Ksizes Test
# output_depth = ksize_rows * ksize_cols * depth = (3 x 3 x 1 ) = 9 ->
[ 1 2 3 11 12 13 21 22 23] ..
# ksizes: raws: [1 2 3 ]
col: [1 2 3 11 12 13 21 22 23]
ksizes = [1, 3, 3, 1]
strides = [1, 3, 3, 1]
rates = [1, 1, 1, 1]
exi_const1 = tf.extract_image_patches(const1,
ksizes, strides, rates, padding='VALID')
print(exi_const1)
print(tf.shape(exi_const1))
tfutil.print_operation_value(exi_const1)
Tensor("ExtractImagePatches_4:0",
shape=(1, 3, 3, 9), dtype=int32)
Tensor("Shape_5:0", shape=(4,),
dtype=int32)
[[[[ 1
2 3 11 12 13 21 22 23]
[
4 5
6 14 15 16 24 25 26]
[
7 8
9 17 18 19 27 28 29]]
[[31 32
33 41 42 43 51 52 53]
[34 35
36 44 45 46 54 55 56]
[37 38
39 47 48 49 57 58 59]]
[[61 62
63 71 72 73 81 82 83]
[64 65
66 74 75 76 84 85 86]
[67 68
69 77 78 79 87 88 89]]]]
In [8]:
# strides Test
# output_depth = ksize_rows * ksize_cols * depth = (3 x 3 x 1 ) = 9 ->
[ 1 2 3 11 12 13 21 22 23] ..
# strides: raws: [1, 4, 7] col:
[1, 31, 61]
ksizes = [1, 3, 3, 1]
strides = [1, 3, 3, 1]
rates = [1, 1, 1, 1]
exi_const1 = tf.extract_image_patches(const1,
ksizes, strides, rates, padding='VALID')
print(exi_const1)
print(tf.shape(exi_const1))
tfutil.print_operation_value(exi_const1)
Tensor("ExtractImagePatches_5:0",
shape=(1, 3, 3, 9), dtype=int32)
Tensor("Shape_6:0", shape=(4,),
dtype=int32)
[[[[ 1
2 3 11 12 13 21 22 23]
[
4 5
6 14 15 16 24 25 26]
[
7 8
9 17 18 19 27 28 29]]
[[31 32
33 41 42 43 51 52 53]
[34 35
36 44 45 46 54 55 56]
[37 38
39 47 48 49 57 58 59]]
[[61 62
63 71 72 73 81 82 83]
[64 65
66 74 75 76 84 85 86]
[67 68
69 77 78 79 87 88 89]]]]
In [9]:
# strides Test
# output_depth = ksize_rows * ksize_cols * depth = (3 x 3 x 1 ) = 9 -> [
1 2 3 11 12 13 21 22 23] ..
# strides: raws: [1, 4, 7] col:
[1, 21, 41, 61]
ksizes = [1, 3, 3, 1]
strides = [1, 2, 3, 1]
rates = [1, 1, 1, 1]
exi_const1 = tf.extract_image_patches(const1,
ksizes, strides, rates, padding='VALID')
print(exi_const1)
print(tf.shape(exi_const1))
tfutil.print_operation_value(exi_const1)
Tensor("ExtractImagePatches_6:0",
shape=(1, 4, 3, 9), dtype=int32)
Tensor("Shape_7:0", shape=(4,),
dtype=int32)
[[[[ 1
2 3 11 12 13 21 22 23]
[
4 5
6 14 15 16 24 25 26]
[
7 8
9 17 18 19 27 28 29]]
[[21 22
23 31 32 33 41 42 43]
[24 25
26 34 35 36 44 45 46]
[27 28
29 37 38 39 47 48 49]]
[[41 42
43 51 52 53 61 62 63]
[44 45
46 54 55 56 64 65 66]
[47 48
49 57 58 59 67 68 69]]
[[61 62
63 71 72 73 81 82 83]
[64 65
66 74 75 76 84 85 86]
[67 68
69 77 78 79 87 88 89]]]]
In [10]:
# strides Test
# output_depth = ksize_rows * ksize_cols * depth = (3 x 3 x 1 ) = 9 ->
[ 1 2 3 11 12 13 21 22 23] ..
# strides: raws: [1, 3, 5, 7] col:
[1, 31, 61]
ksizes = [1, 3, 3, 1]
strides = [1, 3, 2, 1]
rates = [1, 1, 1, 1]
exi_const1 = tf.extract_image_patches(const1,
ksizes, strides, rates, padding='VALID')
print(exi_const1)
print(tf.shape(exi_const1))
tfutil.print_operation_value(exi_const1)
Tensor("ExtractImagePatches_7:0",
shape=(1, 3, 4, 9), dtype=int32)
Tensor("Shape_8:0", shape=(4,),
dtype=int32)
[[[[ 1
2 3 11 12 13 21 22 23]
[
3 4
5 13 14 15 23 24 25]
[
5 6
7 15 16 17 25 26 27]
[
7 8
9 17 18 19 27 28 29]]
[[31 32
33 41 42 43 51 52 53]
[33 34
35 43 44 45 53 54 55]
[35 36
37 45 46 47 55 56 57]
[37 38
39 47 48 49 57 58 59]]
[[61 62
63 71 72 73 81 82 83]
[63 64
65 73 74 75 83 84 85]
[65 66
67 75 76 77 85 86 87]
[67 68
69 77 78 79 87 88 89]]]]
In [11]:
# We generate four outputs as follows:
# 1. 3x3 patches with stride length 5
ksizes = [1, 3, 3, 1]
strides = [1, 5, 5, 1]
rates = [1, 1, 1, 1]
exi_const1 = tf.extract_image_patches(const1,
ksizes, strides, rates, padding='VALID')
print(exi_const1)
print(tf.shape(exi_const1))
tfutil.print_operation_value(exi_const1)
Tensor("ExtractImagePatches_8:0",
shape=(1, 2, 2, 9), dtype=int32)
Tensor("Shape_9:0", shape=(4,),
dtype=int32)
[[[[ 1
2 3 11 12 13 21 22 23]
[
6 7
8 16 17 18 26 27 28]]
[[51 52
53 61 62 63 71 72 73]
[56 57
58 66 67 68 76 77 78]]]]
In [12]:
# 2. Same as above, but the rate is increased to 2
ksizes = [1, 3, 3, 1]
strides = [1, 5, 5, 1]
rates = [1, 2, 2, 1]
exi_const1 = tf.extract_image_patches(const1,
ksizes, strides, rates, padding='VALID')
print(exi_const1)
print(tf.shape(exi_const1))
tfutil.print_operation_value(exi_const1)
Tensor("ExtractImagePatches_9:0",
shape=(1, 2, 2, 9), dtype=int32)
Tensor("Shape_10:0", shape=(4,),
dtype=int32)
[[[[
1 3 5
21 23 25
41 43 45]
[ 6
8 10 26 28 30
46 48 50]]
[[
51 53
55 71 73 75 91
93 95]
[
56 58
60 76 78 80 96 98
100]]]]
In [13]:
# 3. 4x4 patches with stride length 7; only one patch should be generated
ksizes = [1, 4, 4, 1]
strides = [1, 7, 7, 1]
rates = [1, 1, 1, 1]
exi_const1 = tf.extract_image_patches(const1,
ksizes, strides, rates, padding='VALID')
print(exi_const1)
print(tf.shape(exi_const1))
tfutil.print_operation_value(exi_const1)
Tensor("ExtractImagePatches_10:0",
shape=(1, 1, 1, 16), dtype=int32)
Tensor("Shape_11:0", shape=(4,),
dtype=int32)
[[[[ 1
2 3 4 11 12 13 14 21 22 23 24 31 32 33 34]]]]
In [14]:
# 4. Same as above, but with padding set to 'SAME'
ksizes = [1, 4, 4, 1]
strides = [1, 7, 7, 1]
rates = [1, 1, 1, 1]
exi_const1 = tf.extract_image_patches(const1,
ksizes, strides, rates, padding='SAME')
print(exi_const1)
print(tf.shape(exi_const1))
tfutil.print_operation_value(exi_const1)
Tensor("ExtractImagePatches_11:0",
shape=(1, 2, 2, 16), dtype=int32)
Tensor("Shape_12:0", shape=(4,),
dtype=int32)
[[[[
1 2 3
4 11 12
13 14 21
22 23 24
31 32 33 34]
[ 8
9 10 0 18 19
20 0 28
29 30 0
38 39 40
0]]
[[ 71
72 73 74
81 82 83
84 91 92
93 94 0
0 0 0]
[
78 79
80 0 88 89 90
0 98 99 100
0 0 0
0 0]]]]
tf.space_to_batch(input, paddings, block_size, name=None)
인자:
input
: 구조(shape)가 [batch, height, width, depth]
인 4-D Tensor
.height
와 width
차원에서 block_size x block_size
의 크기를 가지는 블록들은 각 위치에서 batch
차원으로 재배열됩니다.- 출력 텐서의 배치(batch)는
batch * block_size * block_size
와 같습니다.
name
: 오퍼레이션의 명칭. (선택사항)
반환값:
출처: 텐서 변환
Tensorflow tf.space_to_batch 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
x = [[[[1], [2]], [[3], [4]]]]
const1 = tf.constant(np.array(x),
dtype=tf.int32)
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(1, 2, 2, 1),
dtype=int32)
Tensor("Shape:0", shape=(4,),
dtype=int32)
[[[[1]
[2]]
[[3]
[4]]]]
In [3]:
# return: [batch*block_size*block_size, height_pad/block_size,
width_pad/block_size, depth]
# paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
# height_pad = pad_top + height + pad_bottom
# width_pad = pad_left + width + pad_right
# blocksize = Both height_pad and width_pad must be divisible by
block_size.
paddings = [[0, 0],[0, 0]]
blocksize = 1
stb_const1 = tf.space_to_batch(const1,
paddings, blocksize)
print(stb_const1)
print(tf.shape(stb_const1))
tfutil.print_operation_value(stb_const1)
Tensor("SpaceToBatchND:0", shape=(1,
2, 2, 1), dtype=int32)
Tensor("Shape_1:0", shape=(4,),
dtype=int32)
[[[[1]
[2]]
[[3]
[4]]]]
In [4]:
paddings = [[0, 0],[0, 1]]
blocksize = 1
stb_const1 = tf.space_to_batch(const1,
paddings, blocksize)
print(stb_const1)
print(tf.shape(stb_const1))
tfutil.print_operation_value(stb_const1)
Tensor("SpaceToBatchND_1:0",
shape=(1, 2, 3, 1), dtype=int32)
Tensor("Shape_2:0", shape=(4,),
dtype=int32)
[[[[1]
[2]
[0]]
[[3]
[4]
[0]]]]
In [5]:
paddings = [[0, 0],[1, 1]]
blocksize = 1
stb_const1 = tf.space_to_batch(const1,
paddings, blocksize)
print(stb_const1)
print(tf.shape(stb_const1))
tfutil.print_operation_value(stb_const1)
Tensor("SpaceToBatchND_2:0",
shape=(1, 2, 4, 1), dtype=int32)
Tensor("Shape_3:0", shape=(4,),
dtype=int32)
[[[[0]
[1]
[2]
[0]]
[[0]
[3]
[4]
[0]]]]
In [6]:
paddings = [[0, 1],[1, 1]]
blocksize = 1
stb_const1 = tf.space_to_batch(const1,
paddings, blocksize)
print(stb_const1)
print(tf.shape(stb_const1))
tfutil.print_operation_value(stb_const1)
Tensor("SpaceToBatchND_3:0",
shape=(1, 3, 4, 1), dtype=int32)
Tensor("Shape_4:0", shape=(4,),
dtype=int32)
[[[[0]
[1]
[2]
[0]]
[[0]
[3]
[4]
[0]]
[[0]
[0]
[0]
[0]]]]
In [7]:
paddings = [[1, 1],[1, 1]]
blocksize = 1
stb_const1 = tf.space_to_batch(const1,
paddings, blocksize)
print(stb_const1)
print(tf.shape(stb_const1))
tfutil.print_operation_value(stb_const1)
Tensor("SpaceToBatchND_4:0",
shape=(1, 4, 4, 1), dtype=int32)
Tensor("Shape_5:0", shape=(4,),
dtype=int32)
[[[[0]
[0]
[0]
[0]]
[[0]
[1]
[2]
[0]]
[[0]
[3]
[4]
[0]]
[[0]
[0]
[0]
[0]]]]
In [8]:
paddings = [[0, 0],[0, 0]]
blocksize = 2
stb_const1 = tf.space_to_batch(const1,
paddings, blocksize)
print(stb_const1)
print(tf.shape(stb_const1))
tfutil.print_operation_value(stb_const1)
Tensor("SpaceToBatchND_5:0",
shape=(4, 1, 1, 1), dtype=int32)
Tensor("Shape_6:0", shape=(4,),
dtype=int32)
[[[[1]]]
[[[2]]]
[[[3]]]
[[[4]]]]
In [9]:
# [batch*block_size*block_size, height_pad/block_size,
width_pad/block_size, depth]
# blocksize = Both height_pad and width_pad must be divisible by
block_size.
paddings = [[0, 0],[1, 1]]
blocksize = 2
stb_const1 = tf.space_to_batch(const1,
paddings, blocksize)
print(stb_const1)
print(tf.shape(stb_const1))
tfutil.print_operation_value(stb_const1)
Tensor("SpaceToBatchND_6:0",
shape=(4, 1, 2, 1), dtype=int32)
Tensor("Shape_7:0", shape=(4,),
dtype=int32)
[[[[0]
[2]]]
[[[1]
[0]]]
[[[0]
[4]]]
[[[3]
[0]]]]
In [10]:
# [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
depth]
# blocksize = Both height_pad and width_pad must be divisible by
block_size.
paddings = [[1, 1],[1, 1]]
blocksize = 2
stb_const1 = tf.space_to_batch(const1,
paddings, blocksize)
print(stb_const1)
print(tf.shape(stb_const1))
tfutil.print_operation_value(stb_const1)
Tensor("SpaceToBatchND_7:0",
shape=(4, 2, 2, 1), dtype=int32)
Tensor("Shape_8:0", shape=(4,),
dtype=int32)
[[[[0]
[0]]
[[0]
[4]]]
[[[0]
[0]]
[[3]
[0]]]
[[[0]
[2]]
[[0]
[0]]]
[[[1]
[0]]
[[0]
[0]]]]
In [11]:
x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
const2 = tf.constant(np.array(x),
dtype=tf.int32)
print(const2)
print(tf.shape(const2))
tfutil.print_constant(const2)
Tensor("Const_1:0", shape=(1, 2, 2,
3), dtype=int32)
Tensor("Shape_9:0", shape=(4,),
dtype=int32)
[[[[ 1
2 3]
[
4 5
6]]
[[
7 8
9]
[10 11
12]]]]
In [12]:
paddings = [[0, 0],[0, 0]]
blocksize = 2
stb_const2 = tf.space_to_batch(const2,
paddings, blocksize)
print(stb_const2)
print(tf.shape(stb_const2))
tfutil.print_operation_value(stb_const2)
Tensor("SpaceToBatchND_8:0",
shape=(4, 1, 1, 3), dtype=int32)
Tensor("Shape_10:0", shape=(4,),
dtype=int32)
[[[[ 1
2 3]]]
[[[
4 5
6]]]
[[[
7 8
9]]]
[[[10 11
12]]]]
In [13]:
x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
const3 = tf.constant(np.array(x),
dtype=tf.int32)
print(const3)
print(tf.shape(const3))
tfutil.print_constant(const3)
Tensor("Const_2:0", shape=(1, 4, 4,
1), dtype=int32)
Tensor("Shape_11:0", shape=(4,),
dtype=int32)
[[[[ 1]
[ 2]
[ 3]
[ 4]]
[[ 5]
[ 6]
[ 7]
[ 8]]
[[ 9]
[10]
[11]
[12]]
[[13]
[14]
[15]
[16]]]]
In [14]:
paddings = [[0, 0],[0, 0]]
blocksize = 2
stb_const3 = tf.space_to_batch(const3,
paddings, blocksize)
print(stb_const3)
print(tf.shape(stb_const3))
tfutil.print_operation_value(stb_const3)
Tensor("SpaceToBatchND_9:0",
shape=(4, 2, 2, 1), dtype=int32)
Tensor("Shape_12:0", shape=(4,),
dtype=int32)
[[[[ 1]
[ 3]]
[[ 9]
[11]]]
[[[ 2]
[ 4]]
[[10]
[12]]]
[[[ 5]
[ 7]]
[[13]
[15]]]
[[[ 6]
[ 8]]
[[14]
[16]]]]
In [15]:
x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
const4 = tf.constant(np.array(x),
dtype=tf.int32)
print(const4)
print(tf.shape(const4))
tfutil.print_constant(const4)
Tensor("Const_3:0", shape=(2, 2, 4,
1), dtype=int32)
Tensor("Shape_13:0", shape=(4,),
dtype=int32)
[[[[ 1]
[ 2]
[ 3]
[ 4]]
[[ 5]
[ 6]
[ 7]
[ 8]]]
[[[ 9]
[10]
[11]
[12]]
[[13]
[14]
[15]
[16]]]]
In [16]:
paddings = [[0, 0],[0, 0]]
blocksize = 2
stb_const4 = tf.space_to_batch(const4,
paddings, blocksize)
print(stb_const4)
print(tf.shape(stb_const4))
tfutil.print_operation_value(stb_const4)
Tensor("SpaceToBatchND_10:0",
shape=(8, 1, 2, 1), dtype=int32)
Tensor("Shape_14:0", shape=(4,),
dtype=int32)
[[[[ 1]
[ 3]]]
[[[ 9]
[11]]]
[[[ 2]
[ 4]]]
[[[10]
[12]]]
[[[ 5]
[ 7]]]
[[[13]
[15]]]
[[[ 6]
[ 8]]]
[[[14]
[16]]]]
tf.batch_to_space(input, crops, block_size, name=None)
인자:
input
: 4-D Tensor
. [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, depth]
의 구조(shape)를 가집니다. 입력 텐서의 배치(batch) 크기는 block_size * block_size
의 배수여야 합니다.name
: 오퍼레이션의 명칭. (선택사항)
반환값:
출처: 텐서 변환
Tensorflow tf.batch_to_space 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
x = [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
const1 = tf.constant(np.array(x),
dtype=tf.int32)
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(4, 1, 1, 1),
dtype=int32)
Tensor("Shape:0", shape=(4,),
dtype=int32)
[[[[1]]]
[[[2]]]
[[[3]]]
[[[4]]]]
In [3]:
# crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
# [batch, height, width, depth]
# height = height_pad - crop_top - crop_bottom
# width = width_pad - crop_left - crop_right
corps = [[0, 0],[0, 1]]
blocksize = 1
bts_const1 = tf.batch_to_space(const1,
corps, blocksize)
print(bts_const1)
print(tf.shape(bts_const1))
tfutil.print_operation_value(bts_const1)
Tensor("BatchToSpaceND:0", shape=(4,
1, 0, 1), dtype=int32)
Tensor("Shape_1:0", shape=(4,),
dtype=int32)
[]
In [4]:
corps = [[0, 0],[0, 1]]
blocksize = 1
bts_const1 = tf.batch_to_space(const1,
corps, blocksize)
print(bts_const1)
print(tf.shape(bts_const1))
tfutil.print_operation_value(bts_const1)
Tensor("BatchToSpaceND_1:0",
shape=(4, 1, 0, 1), dtype=int32)
Tensor("Shape_2:0", shape=(4,),
dtype=int32)
[]
In [5]:
corps = [[0, 1],[0, 0]]
blocksize = 1
bts_const1 = tf.batch_to_space(const1,
corps, blocksize)
print(bts_const1)
print(tf.shape(bts_const1))
tfutil.print_operation_value(bts_const1)
Tensor("BatchToSpaceND_2:0",
shape=(4, 0, 1, 1), dtype=int32)
Tensor("Shape_3:0", shape=(4,),
dtype=int32)
[]
In [6]:
corps = [[1, 0],[0, 0]]
blocksize = 1
bts_const1 = tf.batch_to_space(const1,
corps, blocksize)
print(bts_const1)
print(tf.shape(bts_const1))
tfutil.print_operation_value(bts_const1)
Tensor("BatchToSpaceND_3:0",
shape=(4, 0, 1, 1), dtype=int32)
Tensor("Shape_4:0", shape=(4,),
dtype=int32)
[]
In [7]:
corps = [[0, 0],[0, 0]]
blocksize = 2
bts_const1 = tf.batch_to_space(const1,
corps, blocksize)
print(bts_const1)
print(tf.shape(bts_const1))
tfutil.print_operation_value(bts_const1)
Tensor("BatchToSpaceND_4:0",
shape=(1, 2, 2, 1), dtype=int32)
Tensor("Shape_5:0", shape=(4,),
dtype=int32)
[[[[1]
[2]]
[[3]
[4]]]]
In [8]:
corps = [[0, 0],[1, 1]]
blocksize = 2
bts_const1 = tf.batch_to_space(const1,
corps, blocksize)
print(bts_const1)
print(tf.shape(bts_const1))
tfutil.print_operation_value(bts_const1)
Tensor("BatchToSpaceND_5:0",
shape=(1, 2, 0, 1), dtype=int32)
Tensor("Shape_6:0", shape=(4,),
dtype=int32)
[]
In [9]:
corps = [[1, 1],[0, 0]]
blocksize = 2
bts_const1 = tf.batch_to_space(const1,
corps, blocksize)
print(bts_const1)
print(tf.shape(bts_const1))
tfutil.print_operation_value(bts_const1)
Tensor("BatchToSpaceND_6:0",
shape=(1, 0, 2, 1), dtype=int32)
Tensor("Shape_7:0", shape=(4,),
dtype=int32)
[]
In [10]:
corps = [[1, 1],[1, 1]]
blocksize = 2
bts_const1 = tf.batch_to_space(const1,
corps, blocksize)
print(bts_const1)
print(tf.shape(bts_const1))
tfutil.print_operation_value(bts_const1)
Tensor("BatchToSpaceND_7:0",
shape=(1, 0, 0, 1), dtype=int32)
Tensor("Shape_8:0", shape=(4,),
dtype=int32)
[]
In [11]:
x = [[[[ 1, 2, 3]]],
[[[ 4, 5, 6]]],
[[[ 7, 8, 9]]],
[[[10, 11, 12]]]]
const2 = tf.constant(np.array(x),
dtype=tf.int32)
print(const2)
print(tf.shape(const2))
tfutil.print_constant(const2)
Tensor("Const_1:0", shape=(4, 1, 1,
3), dtype=int32)
Tensor("Shape_9:0", shape=(4,),
dtype=int32)
[[[[ 1
2 3]]]
[[[
4 5
6]]]
[[[
7 8
9]]]
[[[10 11
12]]]]
In [12]:
corps = [[0, 0],[0, 0]]
blocksize = 2
bts_const1 = tf.batch_to_space(const1,
corps, blocksize)
print(bts_const1)
print(tf.shape(bts_const1))
tfutil.print_operation_value(bts_const1)
Tensor("BatchToSpaceND_8:0",
shape=(1, 2, 2, 1), dtype=int32)
Tensor("Shape_10:0", shape=(4,),
dtype=int32)
[[[[1]
[2]]
[[3]
[4]]]]
In [13]:
x = [[[[1], [3]], [[9], [11]]],
[[[2], [4]], [[10], [12]]],
[[[5], [7]], [[13], [15]]],
[[[6], [8]], [[14], [16]]]]
const3 = tf.constant(np.array(x),
dtype=tf.int32)
print(const3)
print(tf.shape(const3))
tfutil.print_constant(const3)
Tensor("Const_2:0", shape=(4, 2, 2,
1), dtype=int32)
Tensor("Shape_11:0", shape=(4,),
dtype=int32)
[[[[ 1]
[ 3]]
[[ 9]
[11]]]
[[[ 2]
[ 4]]
[[10]
[12]]]
[[[ 5]
[ 7]]
[[13]
[15]]]
[[[ 6]
[ 8]]
[[14]
[16]]]]
In [14]:
corps = [[0, 0],[0, 0]]
blocksize = 2
bts_const3 = tf.batch_to_space(const3,
corps, blocksize)
print(bts_const3)
print(tf.shape(bts_const3))
tfutil.print_operation_value(bts_const3)
Tensor("BatchToSpaceND_9:0",
shape=(1, 4, 4, 1), dtype=int32)
Tensor("Shape_12:0", shape=(4,),
dtype=int32)
[[[[ 1]
[ 2]
[ 3]
[ 4]]
[[ 5]
[ 6]
[ 7]
[ 8]]
[[ 9]
[10]
[11]
[12]]
[[13]
[14]
[15]
[16]]]]
In [15]:
x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
[[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
const4 = tf.constant(np.array(x),
dtype=tf.int32)
print(const4)
print(tf.shape(const4))
tfutil.print_constant(const4)
Tensor("Const_3:0", shape=(8, 1, 2,
1), dtype=int32)
Tensor("Shape_13:0", shape=(4,),
dtype=int32)
[[[[ 1]
[ 3]]]
[[[ 9]
[11]]]
[[[ 2]
[ 4]]]
[[[10]
[12]]]
[[[ 5]
[ 7]]]
[[[13]
[15]]]
[[[ 6]
[ 8]]]
[[[14]
[16]]]]
In [16]:
corps = [[0, 0],[0, 0]]
blocksize = 2
bts_const4 = tf.batch_to_space(const4,
corps, blocksize)
print(bts_const4)
print(tf.shape(bts_const4))
tfutil.print_operation_value(bts_const4)
Tensor("BatchToSpaceND_10:0",
shape=(2, 2, 4, 1), dtype=int32)
Tensor("Shape_14:0", shape=(4,),
dtype=int32)
[[[[ 1]
[ 2]
[ 3]
[ 4]]
[[ 5]
[ 6]
[ 7]
[ 8]]]
[[[ 9]
[10]
[11]
[12]]
[[13]
[14]
[15]
[16]]]]
tf.space_to_depth(input, block_size, name=None)
- 크기
block_size x block size
의 블록이 각 위치의 depth
로 재배열됩니다. - 출력 텐서의
depth
차원의 크기는 input_depth * block_size * block_size
입니다. - 입력 텐서의
height
와 width
는 block_size
의 배수여야 합니다.
인자:
input
: Tensor
.block_size
: int
. 공간 블록의 크기를 지정합니다.name
: 오퍼레이션의 명칭. (선택사항)
반환값:
출처: 텐서 변환
Tensorflow tf.space_to_depth 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
x = [[[[1], [2]],
[[3], [4]]]]
const1 = tf.constant(np.array(x),
dtype=tf.int32)
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(1, 2, 2, 1),
dtype=int32)
Tensor("Shape:0", shape=(4,),
dtype=int32)
[[[[1]
[2]]
[[3]
[4]]]]
In [3]:
# input: [batch, height, width, depth]
# output: [batch, height/block_size, width/block_size,
depth*block_size*block_size]
blocksize = 2
std_const1 = tf.space_to_depth(const1,
blocksize)
print(std_const1)
print(tf.shape(std_const1))
tfutil.print_operation_value(std_const1)
Tensor("SpaceToDepth:0", shape=(1, 1,
1, 4), dtype=int32)
Tensor("Shape_1:0", shape=(4,),
dtype=int32)
[[[[1 2 3 4]]]]
In [4]:
x = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
const2 = tf.constant(np.array(x),
dtype=tf.int32)
print(const2)
print(tf.shape(const2))
tfutil.print_constant(const2)
Tensor("Const_1:0", shape=(1, 2, 2,
3), dtype=int32)
Tensor("Shape_2:0", shape=(4,),
dtype=int32)
[[[[ 1
2 3]
[
4 5
6]]
[[
7 8
9]
[10 11
12]]]]
In [5]:
blocksize = 2
std_const2 = tf.space_to_depth(const2,
blocksize)
print(std_const2)
print(tf.shape(std_const2))
tfutil.print_operation_value(std_const2)
Tensor("SpaceToDepth_1:0", shape=(1,
1, 1, 12), dtype=int32)
Tensor("Shape_3:0", shape=(4,),
dtype=int32)
[[[[ 1
2 3 4
5 6 7
8 9 10 11 12]]]]
In [6]:
x = [[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]]
const3 = tf.constant(np.array(x),
dtype=tf.int32)
print(const3)
print(tf.shape(const3))
tfutil.print_constant(const3)
Tensor("Const_2:0", shape=(1, 4, 4,
1), dtype=int32)
Tensor("Shape_4:0", shape=(4,),
dtype=int32)
[[[[ 1]
[ 2]
[ 5]
[ 6]]
[[ 3]
[ 4]
[ 7]
[ 8]]
[[ 9]
[10]
[13]
[14]]
[[11]
[12]
[15]
[16]]]]
In [7]:
blocksize = 2
std_const3 = tf.space_to_depth(const3,
blocksize)
print(std_const3)
print(tf.shape(std_const3))
tfutil.print_operation_value(std_const3)
Tensor("SpaceToDepth_2:0", shape=(1,
2, 2, 4), dtype=int32)
Tensor("Shape_5:0", shape=(4,),
dtype=int32)
[[[[ 1
2 3 4]
[
5 6
7 8]]
[[ 9 10
11 12]
[13 14
15 16]]]]
tf.depth_to_space(input, block_size, name=None)
depth
에서 block_size * block_size
크기의 데이터가 block_size x block_size
의 블록으로 재배열됩니다.- 출력 텐서의
width
의 크기는 input_depth * block_size
이며, height
의 크기는 input_height * block_size
입니다. - 입력 텐서의
depth
는 block_size * block_size
의 배수여야 합니다.
인자:
input
: Tensor
.block_size
: int
. 공간 블록의 크기를 지정합니다.name
: 오퍼레이션의 명칭. (선택사항)
반환값:
출처: 텐서 변환
Tensorflow tf.depth_to_space 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
x = [[[[1, 2, 3, 4]]]]
const1 = tf.constant(np.array(x),
dtype=tf.int32)
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(1, 1, 1, 4),
dtype=int32)
Tensor("Shape:0", shape=(4,),
dtype=int32)
[[[[1 2 3 4]]]]
In [3]:
blocksize = 2
dts_const1 = tf.depth_to_space(const1,
blocksize)
print(dts_const1)
print(tf.shape(dts_const1))
tfutil.print_operation_value(dts_const1)
Tensor("DepthToSpace:0", shape=(1, 2,
2, 1), dtype=int32)
Tensor("Shape_1:0", shape=(4,),
dtype=int32)
[[[[1]
[2]]
[[3]
[4]]]]
In [4]:
x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
const2 = tf.constant(np.array(x),
dtype=tf.int32)
print(const2)
print(tf.shape(const2))
tfutil.print_constant(const2)
Tensor("Const_1:0", shape=(1, 1, 1,
12), dtype=int32)
Tensor("Shape_2:0", shape=(4,),
dtype=int32)
[[[[ 1
2 3 4
5 6 7
8 9 10 11 12]]]]
In [5]:
blocksize = 2
dts_const2 = tf.depth_to_space(const2,
blocksize)
print(dts_const2)
print(tf.shape(dts_const2))
tfutil.print_operation_value(dts_const2)
Tensor("DepthToSpace_1:0", shape=(1,
2, 2, 3), dtype=int32)
Tensor("Shape_3:0", shape=(4,),
dtype=int32)
[[[[ 1
2 3]
[
4 5
6]]
[[
7 8
9]
[10 11
12]]]]
In [6]:
x = [[[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]]]
const3 = tf.constant(np.array(x),
dtype=tf.int32)
print(const3)
print(tf.shape(const3))
tfutil.print_constant(const3)
Tensor("Const_2:0", shape=(1, 2, 2,
4), dtype=int32)
Tensor("Shape_4:0", shape=(4,),
dtype=int32)
[[[[ 1
2 3 4]
[
5 6
7 8]]
[[ 9 10
11 12]
[13 14
15 16]]]]
In [7]:
blocksize = 2
dts_const3 = tf.depth_to_space(const3,
blocksize)
print(dts_const3)
print(tf.shape(dts_const3))
tfutil.print_operation_value(dts_const3)
Tensor("DepthToSpace_2:0", shape=(1,
4, 4, 1), dtype=int32)
Tensor("Shape_5:0", shape=(4,),
dtype=int32)
[[[[ 1]
[ 2]
[ 5]
[ 6]]
[[ 3]
[ 4]
[ 7]
[ 8]]
[[ 9]
[10]
[13]
[14]]
[[11]
[12]
[15]
[16]]]]
tf.gather(params, indices, validate_indices=None, name=None)
인자:
params
: Tensor
.indices
: int32
형 또는 int64
형 Tensor
.validate_indices
: bool
. 기본값은 True
. (선택사항)name
: 오퍼레이션의 명칭. (선택사항)
반환값:
출처: 텐서 변환
Tensorflow tf.gather 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
const1 = tf.constant(np.array(x),
dtype=tf.int32)
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(3, 3),
dtype=int32)
Tensor("Shape:0", shape=(2,),
dtype=int32)
[[1 2 3]
[4 5 6]
[7 8 9]]
In [3]:
idx_const2 = tf.constant([1, 0, 2])
print(idx_const2)
print(tf.shape(idx_const2))
tfutil.print_constant(idx_const2)
Tensor("Const_1:0", shape=(3,),
dtype=int32)
Tensor("Shape_1:0", shape=(1,),
dtype=int32)
[1 0 2]
In [4]:
idx_flattened = tf.range(0, const1.shape[0]) * const1.shape[1] + idx_const2
print(idx_flattened)
print(tf.shape(idx_flattened))
tfutil.print_constant(idx_flattened)
Tensor("add:0", shape=(3,),
dtype=int32)
Tensor("Shape_2:0", shape=(1,),
dtype=int32)
[1 3 8]
In [5]:
# partial code 1
print(const1.shape[0])
tmp1 = tf.range(0, const1.shape[0])
print(tmp1)
print(tf.shape(tmp1))
tfutil.print_constant(tmp1)
3
Tensor("range_1:0", shape=(3,),
dtype=int32)
Tensor("Shape_3:0", shape=(1,),
dtype=int32)
[0 1 2]
In [6]:
# partial code 2
print(const1.shape[1])
tmp2 = tf.range(0, const1.shape[0]) * const1.shape[1]
print(tmp2)
print(tf.shape(tmp2))
tfutil.print_constant(tmp2)
3
Tensor("mul_1:0", shape=(3,),
dtype=int32)
Tensor("Shape_4:0", shape=(1,),
dtype=int32)
[0 3 6]
In [7]:
# partial code 3
tmp3 = tmp2 + idx_const2
print(tmp3)
print(tf.shape(tmp3))
tfutil.print_constant(tmp3)
Tensor("add_1:0", shape=(3,),
dtype=int32)
Tensor("Shape_5:0", shape=(1,),
dtype=int32)
[1 3 8]
In [8]:
params = tf.reshape(x,
[-1])
print(params)
print(tf.shape(params))
tfutil.print_constant(params)
Tensor("Reshape:0", shape=(9,),
dtype=int32)
Tensor("Shape_6:0", shape=(1,),
dtype=int32)
[1 2 3 4 5 6 7 8 9]
In [9]:
gather_const1 = tf.gather(params, # flatten input
idx_flattened) # use
flattened indices
print(gather_const1)
print(tf.shape(gather_const1))
tfutil.print_constant(gather_const1)
Tensor("Gather:0", shape=(3,),
dtype=int32)
Tensor("Shape_7:0", shape=(1,),
dtype=int32)
[2 4 9]
tf.gather_nd(params, indices, name=None)
인자:
params
: R
-D Tensor
. 값들을 모을 Tensor
입니다.indices
: int32
형 또는 int64
형 (N+1)
-D Tensor
. 구조(shape)가 [d_0, ..., d_N, R]
이어야 합니다.name
: 오퍼레이션의 명칭. (선택사항)
반환값:
출처: 텐서 변환
Tensorflow tf.gather_nd 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
# [d_0, ..., d_{Q-2}, params.shape[K], ..., params.shape[P-1]].
x = [['a', 'b'], ['c', 'd']]
const1 = tf.constant(np.array(x))
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(2, 2),
dtype=string)
Tensor("Shape:0", shape=(2,),
dtype=int32)
[['a' 'b']
['c'
'd']]
In [3]:
# Simple indexing into a matrix:
indices = [[0, 0], [1, 1]]
gn_const1 = tf.gather_nd(const1,
indices)
print(gn_const1)
print(tf.shape(gn_const1))
tfutil.print_operation_value(gn_const1)
Tensor("GatherNd:0", shape=(2,),
dtype=string)
Tensor("Shape_1:0", shape=(1,),
dtype=int32)
['a' 'd']
In [4]:
# Slice indexing into a matrix:
indices = [[1], [0]]
gn_const1 = tf.gather_nd(const1,
indices)
print(gn_const1)
print(tf.shape(gn_const1))
tfutil.print_operation_value(gn_const1)
Tensor("GatherNd_1:0", shape=(2, 2),
dtype=string)
Tensor("Shape_2:0", shape=(2,),
dtype=int32)
[['c' 'd']
['a'
'b']]
In [5]:
# Batched indexing into a matrix:
indices = [[[0, 0]], [[0, 1]]]
gn_const1 = tf.gather_nd(const1,
indices)
print(gn_const1)
print(tf.shape(gn_const1))
tfutil.print_operation_value(gn_const1)
Tensor("GatherNd_2:0", shape=(2, 1),
dtype=string)
Tensor("Shape_3:0", shape=(2,),
dtype=int32)
[['a']
['b']]
In [6]:
# Batched slice indexing into a matrix:
indices = [[[1]], [[0]]]
gn_const1 = tf.gather_nd(const1,
indices)
print(gn_const1)
print(tf.shape(gn_const1))
tfutil.print_operation_value(gn_const1)
Tensor("GatherNd_3:0", shape=(2, 1,
2), dtype=string)
Tensor("Shape_4:0", shape=(3,),
dtype=int32)
[[['c' 'd']]
[['a'
'b']]]
In [7]:
x = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
const2 = tf.constant(np.array(x))
print(const2)
print(tf.shape(const2))
tfutil.print_constant(const2)
Tensor("Const_1:0", shape=(2, 2, 2),
dtype=string)
Tensor("Shape_5:0", shape=(3,),
dtype=int32)
[[['a0' 'b0']
['c0'
'd0']]
[['a1'
'b1']
['c1'
'd1']]]
In [8]:
# Indexing into a 3-tensor:
indices = [[1]]
gn_const2 = tf.gather_nd(const2,
indices)
print(gn_const2)
print(tf.shape(gn_const2))
tfutil.print_operation_value(gn_const2)
Tensor("GatherNd_4:0", shape=(1, 2,
2), dtype=string)
Tensor("Shape_6:0", shape=(3,),
dtype=int32)
[[['a1' 'b1']
['c1'
'd1']]]
In [9]:
indices = [[0, 1], [1, 0]]
gn_const2 = tf.gather_nd(const2,
indices)
print(gn_const2)
print(tf.shape(gn_const2))
tfutil.print_operation_value(gn_const2)
Tensor("GatherNd_5:0", shape=(2,
2), dtype=string)
Tensor("Shape_7:0", shape=(2,),
dtype=int32)
[['c0' 'd0']
['a1'
'b1']]
In [10]:
indices = [[0, 0, 1], [1, 0, 1]]
gn_const2 = tf.gather_nd(const2,
indices)
print(gn_const2)
print(tf.shape(gn_const2))
tfutil.print_operation_value(gn_const2)
Tensor("GatherNd_6:0", shape=(2,),
dtype=string)
Tensor("Shape_8:0", shape=(1,),
dtype=int32)
['b0' 'b1']
In [11]:
# Batched indexing into a 3-tensor:
indices = [[[1]], [[0]]]
gn_const2 = tf.gather_nd(const2,
indices)
print(gn_const2)
print(tf.shape(gn_const2))
tfutil.print_operation_value(gn_const2)
Tensor("GatherNd_7:0", shape=(2, 1,
2, 2), dtype=string)
Tensor("Shape_9:0", shape=(4,),
dtype=int32)
[[[['a1' 'b1']
['c1'
'd1']]]
[[['a0'
'b0']
['c0'
'd0']]]]
In [12]:
indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
gn_const2 = tf.gather_nd(const2,
indices)
print(gn_const2)
print(tf.shape(gn_const2))
tfutil.print_operation_value(gn_const2)
Tensor("GatherNd_8:0", shape=(2, 2,
2), dtype=string)
Tensor("Shape_10:0", shape=(3,),
dtype=int32)
[[['c0' 'd0']
['a1'
'b1']]
[['a0'
'b0']
['c1'
'd1']]]
In [13]:
indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
gn_const2 = tf.gather_nd(const2,
indices)
print(gn_const2)
print(tf.shape(gn_const2))
tfutil.print_operation_value(gn_const2)
Tensor("GatherNd_9:0", shape=(2, 2),
dtype=string)
Tensor("Shape_11:0", shape=(2,),
dtype=int32)
[['b0' 'b1']
['d0'
'c1']]
tf.dynamic_partition(data, partitions, num_partitions, name=None)
예시:
# 스칼라 분할
partitions = 1
num_partitions = 2
data = [10, 20]
outputs[0] = [] # Empty with shape [0, 2]
outputs[1] = [[10, 20]]
# 벡터 분할
partitions = [0, 0, 1, 1, 0]
num_partitions = 2
data = [10, 20, 30, 40, 50]
outputs[0] = [10, 20, 50]
outputs[1] = [30, 40]
인자:
data
: Tensor
.partitions
: int32
형 Tensor
. 임의의 구조(shape)가 가능합니다. 범위 [0, num_partitions)
내의 인덱스들을 포함합니다.num_partitions
: int
(>= 1
). 분할의 수.name
: 오퍼레이션의 명칭. (선택사항)
반환값:
출처: 텐서 변환
Tensorflow tf.dynamic_partition 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
# outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
# outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
x = [10, 20]
const1 = tf.constant(np.array(x))
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(2,),
dtype=int64)
Tensor("Shape:0", shape=(1,),
dtype=int32)
[10 20]
In [3]:
# Scalar partitions.
partitions = 1
num_partitions = 2
dyp_const1 = tf.dynamic_partition(const1,
partitions, num_partitions)
print(dyp_const1[0])
print(tf.shape(dyp_const1[0]))
tfutil.print_operation_value(dyp_const1[0])
Tensor("DynamicPartition:0",
shape=(?, 2), dtype=int64)
Tensor("Shape_1:0", shape=(2,),
dtype=int32)
[]
In [4]:
print(dyp_const1[1])
print(tf.shape(dyp_const1[1]))
tfutil.print_operation_value(dyp_const1[1])
Tensor("DynamicPartition:1",
shape=(?, 2), dtype=int64)
Tensor("Shape_2:0", shape=(2,),
dtype=int32)
[[10 20]]
In [5]:
print(dyp_const1)
print(tf.shape(dyp_const1))
tfutil.print_operation_value(dyp_const1)
[<tf.Tensor 'DynamicPartition:0' shape=(?,
2) dtype=int64>, <tf.Tensor 'DynamicPartition:1' shape=(?, 2)
dtype=int64>]
Tensor("Shape_3:0", shape=(3,),
dtype=int32)
[array([], shape=(0, 2), dtype=int64),
array([[10, 20]])]
In [6]:
x = [10, 20, 30, 40, 50]
const2 = tf.constant(np.array(x))
print(const2)
print(tf.shape(const2))
tfutil.print_constant(const2)
Tensor("Const_1:0", shape=(5,),
dtype=int64)
Tensor("Shape_4:0", shape=(1,),
dtype=int32)
[10 20 30 40 50]
In [7]:
# Vector partitions.
partitions = [0, 0, 1, 1, 0]
num_partitions = 2
dyp_const2 = tf.dynamic_partition(const2,
partitions, num_partitions)
print(dyp_const2[0])
print(tf.shape(dyp_const2[0]))
tfutil.print_operation_value(dyp_const2[0])
Tensor("DynamicPartition_1:0",
shape=(?,), dtype=int64)
Tensor("Shape_5:0", shape=(1,),
dtype=int32)
[10 20 50]
In [8]:
print(dyp_const2[1])
print(tf.shape(dyp_const2[1]))
tfutil.print_operation_value(dyp_const2[1])
Tensor("DynamicPartition_1:1",
shape=(?,), dtype=int64)
Tensor("Shape_6:0", shape=(1,),
dtype=int32)
[30 40]
In [9]:
print(dyp_const2)
print(tf.shape(dyp_const2))
tfutil.print_operation_value(dyp_const2)
[<tf.Tensor 'DynamicPartition_1:0'
shape=(?,) dtype=int64>, <tf.Tensor 'DynamicPartition_1:1' shape=(?,)
dtype=int64>]
Tensor("Shape_7:0", shape=(2,),
dtype=int32)
[array([10, 20, 50]), array([30, 40])]
tf.dynamic_stitch(indices, data, name=None)
예시:
indices[0] = 6
indices[1] = [4, 1]
indices[2] = [[5, 2], [0, 3]]
data[0] = [61, 62]
data[1] = [[41, 42], [11, 12]]
data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
[51, 52], [61, 62]]
인자:
indices
: 2개 이상의 int32
형 Tensor
의 리스트.data
: 같은 자료형의 Tensor
들의 리스트. indices
의 텐서 개수와 리스트의 Tensor
수가 같아야 합니다.name
: 오퍼레이션의 명칭. (선택사항)
반환값:
출처: 텐서 변환
Tensorflow tf.dynamic_stitch 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
x = [[1, 2], [3, 4]]
const1 = tf.constant(np.array(x))
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(2, 2),
dtype=int64)
Tensor("Shape:0", shape=(2,),
dtype=int32)
[[1 2]
[3 4]]
In [3]:
y = [1, 1]
row_to_add = tf.constant(np.array(y))
print(row_to_add)
print(tf.shape(row_to_add))
tfutil.print_constant(row_to_add)
Tensor("Const_1:0", shape=(2,),
dtype=int64)
Tensor("Shape_1:0", shape=(1,),
dtype=int32)
[1 1]
In [4]:
original_row = const1[0]
print(original_row)
print(tf.shape(original_row))
tfutil.print_constant(original_row)
Tensor("strided_slice:0", shape=(2,),
dtype=int64)
Tensor("Shape_2:0", shape=(1,),
dtype=int32)
[1 2]
In [5]:
updated_row = original_row + row_to_add
print(updated_row)
print(tf.shape(updated_row))
tfutil.print_operation_value(updated_row)
Tensor("add:0", shape=(2,),
dtype=int64)
Tensor("Shape_3:0", shape=(1,),
dtype=int32)
[2 3]
In [6]:
unchanged_indices = tf.range(tf.size(const1))
print(unchanged_indices)
print(tf.shape(unchanged_indices))
tfutil.print_operation_value(unchanged_indices)
Tensor("range:0", shape=(4,),
dtype=int32)
Tensor("Shape_4:0", shape=(1,),
dtype=int32)
[0 1 2 3]
In [7]:
changed_indices = tf.range(const1.get_shape()[0])
print(changed_indices)
print(tf.shape(changed_indices))
tfutil.print_operation_value(changed_indices)
Tensor("range_1:0", shape=(2,),
dtype=int32)
Tensor("Shape_5:0", shape=(1,),
dtype=int32)
[0 1]
In [8]:
a_flat = tf.reshape(const1,
[-1])
print(a_flat)
print(tf.shape(a_flat))
tfutil.print_operation_value(a_flat)
Tensor("Reshape:0", shape=(4,),
dtype=int64)
Tensor("Shape_6:0", shape=(1,),
dtype=int32)
[1 2 3 4]
In [9]:
updated_a_flat = tf.dynamic_stitch([unchanged_indices,
changed_indices], [a_flat, updated_row])
print(updated_a_flat)
print(tf.shape(updated_a_flat))
tfutil.print_operation_value(updated_a_flat)
Tensor("DynamicStitch:0", shape=(4,),
dtype=int64)
Tensor("Shape_7:0", shape=(1,),
dtype=int32)
[2 3 3 4]
In [10]:
updated_a = tf.reshape(updated_a_flat,
const1.get_shape())
print(updated_a)
print(tf.shape(updated_a))
tfutil.print_operation_value(updated_a)
Tensor("Reshape_1:0", shape=(2, 2),
dtype=int64)
Tensor("Shape_8:0", shape=(2,),
dtype=int32)
[[2 3]
[3 4]]
In [11]:
# Apply function (increments x_i) on elements for which a certain
condition
# apply (x_i != -1 in this example).
x = [0.1, -1., 5.2, 4.3, -1., 7.4]
const1 = tf.constant(np.array(x))
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const_2:0", shape=(6,),
dtype=float64)
Tensor("Shape_9:0", shape=(1,),
dtype=int32)
[ 0.1 -1.
5.2 4.3 -1. 7.4]
In [12]:
condition_mask=tf.not_equal(x,tf.constant(-1.))
print(condition_mask)
print(tf.shape(condition_mask))
tfutil.print_constant(condition_mask)
Tensor("NotEqual:0", shape=(6,),
dtype=bool)
Tensor("Shape_10:0", shape=(1,),
dtype=int32)
[ True False
True True False True]
In [13]:
partitioned_data = tf.dynamic_partition(x,
tf.cast(condition_mask, tf.int32) ,
2)
print(partitioned_data)
print(tf.shape(partitioned_data))
[<tf.Tensor 'DynamicPartition:0' shape=(?,)
dtype=float32>, <tf.Tensor 'DynamicPartition:1' shape=(?,)
dtype=float32>]
Tensor("Shape_11:0", shape=(2,),
dtype=int32)
In [14]:
partitioned_data[1] = partitioned_data[1] + 1.0
print(partitioned_data[1])
print(tf.shape(partitioned_data[1]))
tfutil.print_constant(partitioned_data[1])
Tensor("add_1:0", shape=(?,),
dtype=float32)
Tensor("Shape_12:0", shape=(1,),
dtype=int32)
[ 1.10000002
6.19999981 5.30000019 8.39999962]
In [15]:
condition_indices = tf.dynamic_partition(
tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) ,
2)
print(condition_indices)
print(tf.shape(condition_indices))
[<tf.Tensor 'DynamicPartition_1:0'
shape=(?,) dtype=int32>, <tf.Tensor 'DynamicPartition_1:1' shape=(?,)
dtype=int32>]
Tensor("Shape_14:0", shape=(2,),
dtype=int32)
In [16]:
ds_const1 = tf.dynamic_stitch(condition_indices,
partitioned_data)
print(ds_const1)
print(tf.shape(ds_const1))
tfutil.print_constant(ds_const1)
Tensor("DynamicStitch_1:0",
shape=(?,), dtype=float32)
Tensor("Shape_15:0", shape=(1,),
dtype=int32)
[ 1.10000002 -1. 6.19999981 5.30000019 -1. 8.39999962]
tf.boolean_mask(tensor, mask, name='boolean_mask')
인자:
tensor
: N-D 텐서.mask
: K-D 불리언 텐서, K <= N이고, K는 정적으로(statically) 알려져 있어야 합니다.name
: 오퍼레이션의 명칭. (선택사항)
반환값:
예외:
ValueError
: 모양이 일치하지 않는 경우.
예시:
# 2-D 예시
tensor = [[1, 2], [3, 4], [5, 6]]
mask = [True, False, True]
boolean_mask(tensor, mask) ==> [[1, 2], [5, 6]]
출처: 텐서 변환
Tensorflow tf.boolean_mask 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
x = [0, 1, 2, 3]
const1 = tf.constant(np.array(x))
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(4,),
dtype=int64)
Tensor("Shape:0", shape=(1,),
dtype=int32)
[0 1 2 3]
In [3]:
mask = np.array([True, False, True, False])
bm_const1 = tf.boolean_mask(const1,
mask)
print(bm_const1)
print(tf.shape(bm_const1))
tfutil.print_operation_value(bm_const1)
Tensor("boolean_mask/Gather:0",
shape=(?,), dtype=int64)
Tensor("Shape_1:0", shape=(1,),
dtype=int32)
[0 2]
In [4]:
x = [[1, 2], [3, 4], [5, 6]]
const2 = tf.constant(np.array(x))
print(const2)
print(tf.shape(const2))
tfutil.print_constant(const2)
Tensor("Const_1:0", shape=(3, 2),
dtype=int64)
Tensor("Shape_2:0", shape=(2,),
dtype=int32)
[[1 2]
[3 4]
[5 6]]
In [5]:
mask = np.array([True, False, True])
bm_const2 = tf.boolean_mask(const2,
mask)
print(bm_const2)
print(tf.shape(bm_const2))
tfutil.print_operation_value(bm_const2)
Tensor("boolean_mask_1/Gather:0",
shape=(?, 2), dtype=int64)
Tensor("Shape_3:0", shape=(2,),
dtype=int32)
[[1 2]
[5 6]]
tf.one_hot(indices, depth, on_value=None, off_value=None, axis=None, dtype=None, name=None)
예시:
인자:
indices
: 인덱스들의 Tensor
.depth
: One-hot 차원의 깊이(depth)를 결정하는 스칼라 값.on_value
: indices[j] = i
인 경우 채울 스칼라 값. (기본값: 1, 선택사항)off_value
: indices[j] != i
인 경우 채울 스칼라 값. (기본값: 0, 선택사항)axis
: 채워질 축 (기본값: -1, 선택사항).dtype
: 출력 텐서의 자료형.
반환값:
예외:
TypeError
: on_value
또는 off_value
의 자료형이 dtype
과 다른 경우TypeError
: on_value
와 off_value
의 자료형이 서로 다른 경우
출처: 텐서 변환
Tensorflow tf.one_hot 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
x = [0, 1, 2]
const1 = tf.constant(np.array(x))
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(3,),
dtype=int64)
Tensor("Shape:0", shape=(1,),
dtype=int32)
[0 1 2]
In [3]:
depth = 3
oh_const1 = tf.one_hot(const1,
depth)
print(oh_const1)
print(tf.shape(oh_const1))
tfutil.print_operation_value(oh_const1)
Tensor("one_hot:0", shape=(3, 3),
dtype=float32)
Tensor("Shape_1:0", shape=(2,),
dtype=int32)
[[ 1.
0. 0.]
[
0. 1.
0.]
[
0. 0.
1.]]
In [4]:
x = [0, 2, -1, 1]
const2 = tf.constant(np.array(x))
print(const2)
print(tf.shape(const2))
tfutil.print_constant(const2)
Tensor("Const_1:0", shape=(4,), dtype=int64)
Tensor("Shape_2:0", shape=(1,),
dtype=int32)
[ 0 2
-1 1]
In [5]:
depth = 3
bm_const2 = tf.one_hot(const2,
depth,
on_value=5.0, off_value=0.0, axis=-1)
print(bm_const2)
print(tf.shape(bm_const2))
tfutil.print_operation_value(bm_const2)
Tensor("one_hot_1:0", shape=(4, 3),
dtype=float32)
Tensor("Shape_3:0", shape=(2,),
dtype=int32)
[[ 5.
0. 0.]
[
0. 0.
5.]
[
0. 0.
0.]
[
0. 5.
0.]]
In [6]:
x = [[0, 2], [1, -1]]
const3 = tf.constant(np.array(x))
print(const3)
print(tf.shape(const3))
tfutil.print_constant(const3)
Tensor("Const_2:0", shape=(2, 2),
dtype=int64)
Tensor("Shape_4:0", shape=(2,),
dtype=int32)
[[ 0 2]
[ 1 -1]]
In [7]:
depth = 3
bm_const3 = tf.one_hot(const3,
depth,
on_value=1.0, off_value=0.0, axis=-1)
print(bm_const3)
print(tf.shape(bm_const3))
tfutil.print_operation_value(bm_const3)
Tensor("one_hot_2:0", shape=(2, 2,
3), dtype=float32)
Tensor("Shape_5:0", shape=(3,),
dtype=int32)
[[[ 1.
0. 0.]
[
0. 0.
1.]]
[[
0. 1.
0.]
[
0. 0.
0.]]]
기타 함수 및 클래스
tf.bitcast(input, type, name=None)
인자:
input
: Tensor
. 다음의 자료형이 가능합니다: float32
, float64
, int64
, int32
, uint8
, uint16
, int16
, int8
, complex64
, complex128
, qint8
, quint8
, qint32
, half
.type
: tf.DType
. 다음 중 하나가 가능합니다: tf.float32, tf.float64, tf.int64, tf.int32, tf.uint8, tf.uint16, tf.int16, tf.int8, tf.complex64, tf.complex128, tf.qint8, tf.quint8, tf.qint32, tf.half
.name
: 오퍼레이션의 명칭. (선택사항)
반환값:
출처: 텐서 변환
Tensorflow tf.bitcast 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
x = 37.0
const1 = tf.constant(x)
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(),
dtype=float32)
Tensor("Shape:0", shape=(0,),
dtype=int32)
37.0
In [3]:
bc_const1 = tf.bitcast(const1,
tf.int32)
print(bc_const1)
print(tf.shape(bc_const1))
tfutil.print_operation_value(bc_const1)
Tensor("Bitcast:0", shape=(),
dtype=int32)
Tensor("Shape_1:0", shape=(0,),
dtype=int32)
1108606976
In [4]:
x = -1
invert_bits = tf.constant(x)
- bc_const1
print(invert_bits)
print(tf.shape(invert_bits))
tfutil.print_operation_value(invert_bits)
Tensor("sub:0", shape=(),
dtype=int32)
Tensor("Shape_2:0", shape=(0,),
dtype=int32)
-1108606977
In [5]:
bc_to_float = tf.bitcast(invert_bits,
tf.float32)
print(bc_to_float)
print(tf.shape(bc_to_float))
tfutil.print_operation_value(bc_to_float)
Tensor("Bitcast_1:0", shape=(),
dtype=float32)
Tensor("Shape_3:0", shape=(0,),
dtype=int32)
-0.115234
tf.shape_n(input, name=None)
인자:
input
: 같은 자료형의 1개 이상의 Tensor
의 리스트.name
: 오퍼레이션의 명칭. (선택사항)
반환값:
출처: 텐서 변환
Tensorflow tf.shape_n 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
x = [1]
const1 = tf.constant(np.array(x))
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(1,),
dtype=int64)
Tensor("Shape:0", shape=(1,),
dtype=int32)
[1]
In [3]:
sn_const1 = tf.shape_n([const1])
print(sn_const1)
tfutil.print_operation_value(sn_const1)
[<tf.Tensor 'ShapeN:0' shape=(1,)
dtype=int32>]
[array([1], dtype=int32)]
In [4]:
x = [1, 2]
const2 = tf.constant(np.array(x))
print(const2)
print(tf.shape(const2))
tfutil.print_constant(const2)
Tensor("Const_1:0", shape=(2,),
dtype=int64)
Tensor("Shape_1:0", shape=(1,),
dtype=int32)
[1 2]
In [5]:
sn_const2 = tf.shape_n([const2])
print(sn_const2)
tfutil.print_operation_value(sn_const2)
[<tf.Tensor 'ShapeN_1:0' shape=(1,)
dtype=int32>]
[array([2], dtype=int32)]
In [6]:
x = [[1, 2], [3, 4]]
const3 = tf.constant(np.array(x))
print(const3)
print(tf.shape(const3))
tfutil.print_constant(const3)
Tensor("Const_2:0", shape=(2, 2),
dtype=int64)
Tensor("Shape_2:0", shape=(2,),
dtype=int32)
[[1 2]
[3 4]]
In [7]:
sn_const3 = tf.shape_n([const3])
print(sn_const3)
tfutil.print_operation_value(sn_const3)
[<tf.Tensor 'ShapeN_2:0' shape=(2,)
dtype=int32>]
[array([2, 2], dtype=int32)]
In [8]:
x = [[1, 2], [3, 4], [5, 6]]
const4 = tf.constant(np.array(x))
print(const4)
print(tf.shape(const4))
tfutil.print_constant(const4)
Tensor("Const_3:0", shape=(3, 2),
dtype=int64)
Tensor("Shape_3:0", shape=(2,),
dtype=int32)
[[1 2]
[3 4]
[5 6]]
In [9]:
sn_const4 = tf.shape_n([const4])
print(sn_const4)
tfutil.print_operation_value(sn_const4)
[<tf.Tensor 'ShapeN_3:0' shape=(2,)
dtype=int32>]
[array([3, 2], dtype=int32)]
In [10]:
x = [[[1], [2]], [[3], [4]]]
const5 = tf.constant(np.array(x))
print(const5)
print(tf.shape(const5))
tfutil.print_constant(const5)
Tensor("Const_4:0", shape=(2, 2, 1),
dtype=int64)
Tensor("Shape_4:0", shape=(3,),
dtype=int32)
[[[1]
[2]]
[[3]
[4]]]
In [11]:
sn_const5 = tf.shape_n([const5])
print(sn_const5)
tfutil.print_operation_value(sn_const5)
[<tf.Tensor 'ShapeN_4:0' shape=(3,)
dtype=int32>]
[array([2, 2, 1], dtype=int32)]
In [12]:
x = [[[1], [2]], [[3], [4]], [[5], [6]]]
const6 = tf.constant(np.array(x))
print(const6)
print(tf.shape(const6))
tfutil.print_constant(const6)
Tensor("Const_5:0", shape=(3, 2, 1),
dtype=int64)
Tensor("Shape_5:0", shape=(3,),
dtype=int32)
[[[1]
[2]]
[[3]
[4]]
[[5]
[6]]]
In [13]:
sn_const6 = tf.shape_n([const6])
print(sn_const6)
tfutil.print_operation_value(sn_const6)
[<tf.Tensor 'ShapeN_5:0' shape=(3,)
dtype=int32>]
[array([3, 2, 1], dtype=int32)]
In [14]:
sn_const6 = tf.shape_n([[const6]])
print(sn_const6)
tfutil.print_operation_value(sn_const6)
[<tf.Tensor 'ShapeN_6:0' shape=(4,)
dtype=int32>]
[array([1, 3, 2, 1], dtype=int32)]
tf.unique_with_counts(x, name=None)
예시:
# tensor 'x'는 [1, 1, 2, 4, 4, 4, 7, 8, 8]
y, idx, count = unique_with_counts(x)
y ==> [1, 2, 4, 7, 8]
idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
count ==> [2, 1, 3, 1, 2]
인자:
x
: 1-D Tensor
.name
: 오퍼레이션의 명칭. (선택사항)
반환값:
y
: x
와 자료형이 같은 1-D Tensor
.idx
: int32
형 1-D Tensor
.count
: int32
형 1-D Tensor
.
출처: 텐서 변환
Tensorflow tf.unique_with_counts 결과
In [1]:
import tensorflow as tf
import numpy as np
import tfutil
In [2]:
x = [1, 1, 2, 4, 4, 4, 7, 8, 8]
const1 = tf.constant(np.array(x),
dtype=tf.int32)
print(const1)
print(tf.shape(const1))
tfutil.print_constant(const1)
Tensor("Const:0", shape=(9,),
dtype=int32)
Tensor("Shape:0", shape=(1,),
dtype=int32)
[1 1 2 4 4 4 7 8 8]
In [3]:
y, idx, count = tf.unique_with_counts(const1)
print(y)
print(tf.shape(y))
tfutil.print_constant(y)
Tensor("UniqueWithCounts:0",
shape=(?,), dtype=int32)
Tensor("Shape_1:0", shape=(1,),
dtype=int32)
[1 2 4 7 8]
In [4]:
print(idx)
print(tf.shape(idx))
tfutil.print_constant(idx)
Tensor("UniqueWithCounts:1",
shape=(9,), dtype=int32)
Tensor("Shape_2:0", shape=(1,),
dtype=int32)
[0 0 1 2 2 2 3 4 4]
In [5]:
print(count)
print(tf.shape(count))
tfutil.print_constant(count)
Tensor("UniqueWithCounts:2",
shape=(?,), dtype=int32)
Tensor("Shape_3:0", shape=(1,),
dtype=int32)
[2 1 3 1 2]
이 포스팅은 머신러닝/딥러닝 오픈소스 Tensorflow 개발을 위한 선행학습으로 Tensorflow API Document의 Python API 대한 학습노트입니다.