Skip to content

Commit 4948b5c

Browse files
CHRIS DYERCHRIS DYER
authored andcommitted
fix Eigen index type errors that strict compilers/some versions of Eigen cause
1 parent b066cec commit 4948b5c

13 files changed

+131
-133
lines changed

dynet/nodes-affinetransform.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ void AffineTransform::forward_dev_impl(const MyDevice & dev, const vector<const
8787
tvec(fx).device(*dev.edevice) = tvec(*xs[0]);
8888
} else {
8989
#ifdef __CUDACC__
90-
Eigen::array<int, 3> bcast; bcast[0] = 1; bcast[1] = fx.d[1]/xs[0]->d[1]; bcast[2] = fx.d.bd/xs[0]->d.bd;
90+
Eigen::array<ptrdiff_t, 3> bcast = {1, fx.d[1]/xs[0]->d[1], fx.d.bd/xs[0]->d.bd};
9191
tb<2>(fx).device(*dev.edevice) = tb<2>(*xs[0]).broadcast(bcast);
9292
#else
9393
DYNET_ARG_CHECK(xs[0]->d.bd == 1, "In AffineTransform, broadcasting over columns with mini-batched inputs is not implemented yet");
@@ -137,10 +137,10 @@ void AffineTransform::backward_dev_impl(const MyDevice & dev,
137137
DYNET_ARG_CHECK(dEdxi.d.bd == 1, "In AffineTransform, broadcasting over columns with mini-batched inputs is not implemented yet");
138138
#ifdef __CUDACC__
139139
if(dEdxi.d[1] == dEdf.d[1]) {
140-
Eigen::array<int, 1> red_axis; red_axis[0] = 2;
140+
Eigen::array<ptrdiff_t, 1> red_axis = { 2 };
141141
t<2>(dEdxi).device(*dev.edevice) += tb<2>(dEdf).sum(red_axis);
142142
} else {
143-
Eigen::array<int, 2> red_axis; red_axis[0] = 1; red_axis[1] = 2;
143+
Eigen::array<ptrdiff_t, 2> red_axis = {1, 2};
144144
t<1>(dEdxi).device(*dev.edevice) += tb<2>(dEdf).sum(red_axis);
145145
}
146146
#else

dynet/nodes-arith-cwise.cc

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ void CwiseSum::forward_dev_impl(const MyDevice & dev, const vector<const Tensor*
5959
} else {
6060
int greater = xs[0]->d.bd > xs[1]->d.bd ? 0 : 1;
6161
#ifdef __CUDACC__
62-
Eigen::array<int, 2> bcast = {1,(int)xs[greater]->d.bd};
62+
Eigen::array<ptrdiff_t, 2> bcast = {1, xs[greater]->d.bd};
6363
tbvec(fx).device(*dev.edevice) = tbvec(*xs[1-greater]).broadcast(bcast) + tbvec(*xs[greater]);
6464
#else
6565
for(size_t b = 0; b < fx.d.bd; ++b)
@@ -68,7 +68,7 @@ void CwiseSum::forward_dev_impl(const MyDevice & dev, const vector<const Tensor*
6868
}
6969
// Broadcasting over dims as well
7070
} else {
71-
Eigen::array<int, 5> bcast_left = {1,1,1,1,1}, bcast_right = {1,1,1,1,1};
71+
Eigen::array<ptrdiff_t, 5> bcast_left = {1,1,1,1,1}, bcast_right = {1,1,1,1,1};
7272
bool has_left = false, has_right = false;
7373
for(; i < fx.d.nd; ++i){
7474
if(xs[0]->d[i] > xs[1]->d[i]) {
@@ -114,7 +114,7 @@ void CwiseSum::backward_dev_impl(const MyDevice & dev,
114114
tvec(dEdxi).device(*dev.edevice) += tvec(dEdf);
115115
} else {
116116
#ifdef __CUDACC__
117-
Eigen::array<int, 1> red_axis = {1};
117+
Eigen::array<ptrdiff_t, 1> red_axis = {1};
118118
tvec(dEdxi).device(*dev.edevice) += tbvec(dEdf).sum(red_axis);
119119
#else
120120
for(size_t b = 0; b < dEdf.d.bd; ++b)
@@ -140,10 +140,10 @@ void CwiseSum::backward_helper(const MyDevice & dev,
140140
const Tensor& dEdf,
141141
unsigned i,
142142
Tensor& dEdxi) const {
143-
Eigen::array<int, ReductionOrder> red_axis;
143+
Eigen::array<ptrdiff_t, ReductionOrder> red_axis;
144144
if(ReductionOrder>0) red_axis[ReductionOrder-1] = 4;
145145
int curr_red_axis = 0;
146-
Eigen::array<int, 5> morph = {1,1,1,1,(int)xs[i]->d.bd};
146+
Eigen::array<ptrdiff_t, 5> morph = {1,1,1,1,(int)xs[i]->d.bd};
147147
for(unsigned int di = 0; di < fx.d.nd; di++) {
148148
if((di >= xs[i]->d.nd && fx.d[di]>1) || xs[i]->d[di] != fx.d[di]) {
149149
red_axis[curr_red_axis] = di;
@@ -205,7 +205,7 @@ void CwiseMultiply::forward_dev_impl(const MyDevice & dev, const vector<const Te
205205
} else {
206206
int greater = xs[0]->d.bd > xs[1]->d.bd ? 0 : 1;
207207
#ifdef __CUDACC__
208-
Eigen::array<int, 2> bcast = {1,(int)xs[greater]->d.bd};
208+
Eigen::array<ptrdiff_t, 2> bcast = {1,(int)xs[greater]->d.bd};
209209
tbvec(fx).device(*dev.edevice) = tbvec(*xs[1-greater]).broadcast(bcast) * tbvec(*xs[greater]);
210210
#else
211211
for(size_t b = 0; b < fx.d.bd; ++b)
@@ -214,7 +214,7 @@ void CwiseMultiply::forward_dev_impl(const MyDevice & dev, const vector<const Te
214214
}
215215
// Broadcasting over dims as well
216216
} else {
217-
Eigen::array<int, 5> bcast_left = {1,1,1,1,1}, bcast_right = {1,1,1,1,1};
217+
Eigen::array<ptrdiff_t, 5> bcast_left = {1,1,1,1,1}, bcast_right = {1,1,1,1,1};
218218
bool has_left = false, has_right = false;
219219
for(; i < fx.d.nd; ++i){
220220
if(xs[0]->d[i] > xs[1]->d[i]) {
@@ -263,10 +263,10 @@ void CwiseMultiply::backward_dev_impl(const MyDevice & dev,
263263
tvec(dEdxi).device(*dev.edevice) += tvec(dEdf) * tvec(*xs[1-i]);
264264
} else if(xs[1-i]->d.bd == 1) {
265265
// TODO: Make alternative code path for CPU?
266-
Eigen::array<int, 2> bcast; bcast[0] = 1; bcast[1] = fx.d.bd;
266+
Eigen::array<ptrdiff_t, 2> bcast = { 1, fx.d.bd };
267267
tbvec(dEdxi).device(*dev.edevice) += tbvec(dEdf) * tbvec(*xs[1-i]).broadcast(bcast);
268268
} else {
269-
Eigen::array<int, 1> red_axis; red_axis[0] = 1;
269+
Eigen::array<ptrdiff_t, 1> red_axis = {1};
270270
tvec(dEdxi).device(*dev.edevice) += (tbvec(dEdf) * tbvec(*xs[1-i])).sum(red_axis);
271271
}
272272
// Otherwise work with broadcasting, etc.
@@ -288,8 +288,8 @@ void CwiseMultiply::backward_helper(const MyDevice & dev,
288288
const Tensor& dEdf,
289289
unsigned i,
290290
Tensor& dEdxi) const {
291-
Eigen::array<int, ReductionOrder> red_axis;
292-
Eigen::array<int, 5> morph = {1,1,1,1,(int)xs[i]->d.bd}, bcast_other = {1,1,1,1,1};
291+
Eigen::array<ptrdiff_t, ReductionOrder> red_axis;
292+
Eigen::array<ptrdiff_t, 5> morph = {1,1,1,1,(int)xs[i]->d.bd}, bcast_other = {1,1,1,1,1};
293293
if(ReductionOrder>0) red_axis[ReductionOrder-1] = 4;
294294
int curr_red_axis = 0;
295295
for(unsigned int di = 0; di < fx.d.nd; di++){
@@ -341,7 +341,7 @@ void CwiseQuotient::forward_dev_impl(const MyDevice & dev, const vector<const Te
341341
if(xs[0]->d.size() == xs[1]->d.size()){
342342
tb<4>(fx).device(*dev.edevice) = tb<4>(*xs[0]) / tb<4>(*xs[1]);
343343
} else {
344-
Eigen::array<int, 5> bcast = {1,1,1,1,1};
344+
Eigen::array<ptrdiff_t, 5> bcast = {1,1,1,1,1};
345345
for(unsigned int di = 0; di<xs[0]->d.nd; di++){
346346
if(xs[1]->d[di]==1) bcast[di] = xs[0]->d[di];
347347
}
@@ -362,7 +362,7 @@ void CwiseQuotient::backward_dev_impl(const MyDevice & dev,
362362
if(xs[0]->d.size() == xs[1]->d.size()){
363363
tb<4>(dEdxi).device(*dev.edevice) += tb<4>(dEdf) / tb<4>(*xs[1]);
364364
} else {
365-
Eigen::array<int, 5> bcast = {1,1,1,1,1};
365+
Eigen::array<ptrdiff_t, 5> bcast = {1,1,1,1,1};
366366
for(unsigned int di = 0; di<xs[0]->d.nd; di++){
367367
if(xs[0]->d[di]!=xs[1]->d[di]) bcast[di] = xs[0]->d[di];
368368
}
@@ -393,7 +393,7 @@ void CwiseQuotient::backward_helper(const MyDevice & dev,
393393
const Tensor& dEdf,
394394
unsigned i,
395395
Tensor& dEdxi) const {
396-
Eigen::array<int, ReductionOrder> red_axis;
396+
Eigen::array<ptrdiff_t, ReductionOrder> red_axis;
397397
if(ReductionOrder>0) red_axis[ReductionOrder-1] = 4;
398398
int curr_red_axis = 0;
399399
for(unsigned int di = 0; di < xs[0]->d.nd; di++){
@@ -402,12 +402,12 @@ void CwiseQuotient::backward_helper(const MyDevice & dev,
402402
curr_red_axis++;
403403
}
404404
}
405-
Eigen::array<int, 5> morph = {1,1,1,1,1};
405+
Eigen::array<ptrdiff_t, 5> morph = {1,1,1,1,1};
406406
for(unsigned int di = 0; di < xs[0]->d.nd; di++){
407407
morph[di] = xs[i]->d[di];
408408
}
409409
morph[4] = xs[i]->d.bd;
410-
Eigen::array<int, 5> bcast = {1,1,1,1,1};
410+
Eigen::array<ptrdiff_t, 5> bcast = {1,1,1,1,1};
411411
for(unsigned int di = 0; di < xs[0]->d.nd; di++){
412412
if(xs[0]->d[di]!=xs[1]->d[di]) bcast[di] = xs[0]->d[di];
413413
}

dynet/nodes-arith-sum.cc

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ void Sum::forward_dev_impl(const MyDevice & dev, const vector<const Tensor*>& xs
9494
// Not all the same batch size, so need to broadcast in the cases where they differ
9595
TensorTools::zero(fx);
9696
#ifdef __CUDACC__
97-
Eigen::array<int, 2> bcast({ 1, (int)fx.d.bd });
97+
Eigen::array<ptrdiff_t, 2> bcast({ 1, fx.d.bd });
9898
#endif
9999
for (unsigned i = 0; i < num_args; ++i) {
100100
if (xs[i]->d.bd == fx.d.bd) {
@@ -123,7 +123,7 @@ void Sum::backward_dev_impl(const MyDevice & dev,
123123
if(dEdxi.d.bd == fx.d.bd) {
124124
tvec(dEdxi).device(*dev.edevice) += tvec(dEdf);
125125
} else {
126-
Eigen::array<int, 1> red_axis = {1};
126+
Eigen::array<ptrdiff_t, 1> red_axis = {1};
127127
tvec(dEdxi).device(*dev.edevice) += tbvec(dEdf).sum(red_axis);
128128
}
129129
}
@@ -149,7 +149,7 @@ Dim SumElements::dim_forward(const vector<Dim>& xs) const {
149149
template<class MyDevice>
150150
void SumElements::forward_dev_impl(const MyDevice & dev, const vector<const Tensor*>& xs, Tensor& fx) const {
151151
DYNET_ARG_CHECK(xs.size() == 1, "Failed dimension check in SumElements::forward");
152-
Eigen::array<int, 1> red_axis; red_axis[0] = 0;
152+
Eigen::array<ptrdiff_t, 1> red_axis = {0};
153153
tb<0>(fx).device(*dev.edevice) = tbvec(*xs[0]).sum(red_axis);
154154
}
155155

@@ -161,7 +161,7 @@ void SumElements::backward_dev_impl(const MyDevice & dev,
161161
unsigned i,
162162
Tensor& dEdxi) const {
163163
DYNET_ARG_CHECK(i == 0, "Failed dimension check in SumElements::backward");
164-
Eigen::array<int, 2> bcast = {(int)xs[0]->d.batch_size(), 1};
164+
Eigen::array<ptrdiff_t, 2> bcast = {xs[0]->d.batch_size(), 1};
165165
tbvec(dEdxi).device(*dev.edevice) += tbvec(dEdf).broadcast(bcast);
166166
}
167167
DYNET_NODE_INST_DEV_IMPL(SumElements)
@@ -199,19 +199,19 @@ void SumDimension::forward_dev_impl(const MyDevice & dev, const vector<const Ten
199199
DYNET_ASSERT(xs.size() == 1, "Failed input count check in SumDimension");
200200

201201
if(dims.size()==0 && include_batch_dim){
202-
Eigen::array<int, 1> reduction_axis = {1};
202+
Eigen::array<ptrdiff_t, 1> reduction_axis = {1};
203203
tvec(fx).device(*dev.edevice) = tbvec(*xs[0]).sum(reduction_axis);
204204
} else if(dims.size()==1 && !include_batch_dim){
205-
Eigen::array<int, 1> reduction_axis = {(int)dims[0]};
205+
Eigen::array<ptrdiff_t, 1> reduction_axis = {dims[0]};
206206
tb<2>(fx).device(*dev.edevice) = tb<3>(*xs[0]).sum(reduction_axis);
207207
} else if(dims.size()==1 && include_batch_dim){
208-
Eigen::array<int, 2> reduction_axis = {(int)dims[0], 3};
208+
Eigen::array<ptrdiff_t, 2> reduction_axis = {dims[0], 3};
209209
t<2>(fx).device(*dev.edevice) = tb<3>(*xs[0]).sum(reduction_axis);
210210
} else if(dims.size()==2 && !include_batch_dim){
211-
Eigen::array<int, 2> reduction_axis = {(int)dims[0], (int)dims[1]};
211+
Eigen::array<ptrdiff_t, 2> reduction_axis = {dims[0], dims[1]};
212212
tb<1>(fx).device(*dev.edevice) = tb<3>(*xs[0]).sum(reduction_axis);
213213
} else if(dims.size()==2 && include_batch_dim){
214-
Eigen::array<int, 3> reduction_axis = {(int)dims[0], (int)dims[1], 3};
214+
Eigen::array<ptrdiff_t, 3> reduction_axis = {dims[0], dims[1], 3};
215215
t<1>(fx).device(*dev.edevice) = tb<3>(*xs[0]).sum(reduction_axis);
216216
}
217217
}
@@ -226,23 +226,23 @@ void SumDimension::backward_dev_impl(const MyDevice & dev,
226226
DYNET_ARG_CHECK(i == 0, "Failed dimension check in SumDimension::backward");
227227

228228
if(dims.size()==0 && include_batch_dim){
229-
Eigen::array<int, 2> bcast = {1, (int)xs[0]->d.bd};
229+
Eigen::array<ptrdiff_t, 2> bcast = {1, xs[0]->d.bd};
230230
tbvec(dEdxi).device(*dev.edevice) += tbvec(dEdf).broadcast(bcast);
231231
} else if(dims.size()==1 && !include_batch_dim){
232-
Eigen::array<int, 4> bcast = {1,1,1,1}; bcast[dims[0]] = xs[0]->d[dims[0]];
233-
Eigen::array<int, 4> morph = {(int)xs[0]->d[0],(int)xs[0]->d[1],(int)xs[0]->d[2],(int)xs[0]->d.bd}; morph[dims[0]] = 1;
232+
Eigen::array<ptrdiff_t, 4> bcast = {1,1,1,1}; bcast[dims[0]] = xs[0]->d[dims[0]];
233+
Eigen::array<ptrdiff_t, 4> morph = {xs[0]->d[0],xs[0]->d[1],xs[0]->d[2],xs[0]->d.bd}; morph[dims[0]] = 1;
234234
tb<3>(dEdxi).device(*dev.edevice) += tb<2>(dEdf).reshape(morph).broadcast(bcast);
235235
} else if(dims.size()==1 && include_batch_dim){
236-
Eigen::array<int, 4> bcast = {1,1,1,1}; bcast[dims[0]] = xs[0]->d[dims[0]]; bcast[3] = xs[0]->d.bd;
237-
Eigen::array<int, 4> morph = {(int)xs[0]->d[0],(int)xs[0]->d[1],(int)xs[0]->d[2],(int)1}; morph[dims[0]] = 1;
236+
Eigen::array<ptrdiff_t, 4> bcast = {1,1,1,1}; bcast[dims[0]] = xs[0]->d[dims[0]]; bcast[3] = xs[0]->d.bd;
237+
Eigen::array<ptrdiff_t, 4> morph = {xs[0]->d[0],xs[0]->d[1],xs[0]->d[2],1}; morph[dims[0]] = 1;
238238
tb<3>(dEdxi).device(*dev.edevice) += t<2>(dEdf).reshape(morph).broadcast(bcast);
239239
} else if(dims.size()==2 && !include_batch_dim){
240-
Eigen::array<int, 4> bcast = {1,1,1,1}; bcast[dims[0]] = xs[0]->d[dims[0]]; bcast[dims[1]] = xs[0]->d[dims[1]];
241-
Eigen::array<int, 4> morph = {(int)xs[0]->d[0],(int)xs[0]->d[1],(int)xs[0]->d[2],(int)xs[0]->d.bd}; morph[dims[0]] = 1; morph[dims[1]] = 1;
240+
Eigen::array<ptrdiff_t, 4> bcast = {1,1,1,1}; bcast[dims[0]] = xs[0]->d[dims[0]]; bcast[dims[1]] = xs[0]->d[dims[1]];
241+
Eigen::array<ptrdiff_t, 4> morph = {xs[0]->d[0],xs[0]->d[1],xs[0]->d[2],xs[0]->d.bd}; morph[dims[0]] = 1; morph[dims[1]] = 1;
242242
tb<3>(dEdxi).device(*dev.edevice) += tb<1>(dEdf).reshape(morph).broadcast(bcast);
243243
} else if(dims.size()==2 && include_batch_dim){
244-
Eigen::array<int, 4> bcast = {1,1,1,1}; bcast[dims[0]] = xs[0]->d[dims[0]]; bcast[dims[1]] = xs[0]->d[dims[1]]; bcast[3] = xs[0]->d.bd;
245-
Eigen::array<int, 4> morph = {(int)xs[0]->d[0],(int)xs[0]->d[1],(int)xs[0]->d[2],(int)1}; morph[dims[0]] = 1; morph[dims[1]] = 1;
244+
Eigen::array<ptrdiff_t, 4> bcast = {1,1,1,1}; bcast[dims[0]] = xs[0]->d[dims[0]]; bcast[dims[1]] = xs[0]->d[dims[1]]; bcast[3] = xs[0]->d.bd;
245+
Eigen::array<ptrdiff_t, 4> morph = {xs[0]->d[0],xs[0]->d[1],xs[0]->d[2],1}; morph[dims[0]] = 1; morph[dims[1]] = 1;
246246
tb<3>(dEdxi).device(*dev.edevice) += t<1>(dEdf).reshape(morph).broadcast(bcast);
247247
}
248248
}
@@ -274,13 +274,13 @@ void AddVectorToAllColumns::forward_dev_impl(const MyDevice & dev, const vector<
274274
// Broadcasting is slow on CPU, so split codepaths
275275
#ifdef __CUDACC__
276276
if(xs[0]->d.bd >= xs[1]->d.bd) {
277-
Eigen::array<int, 3> bcasts = {1, (int)xs[0]->d[1], (int)(xs[0]->d.bd/xs[1]->d.bd)};
277+
Eigen::array<ptrdiff_t, 3> bcasts = {1, xs[0]->d[1], xs[0]->d.bd/xs[1]->d.bd};
278278
tb<2>(fx).device(*dev.edevice) = tb<2>(*xs[0]) + tb<2>(*xs[1]).broadcast(bcasts);
279279
} else {
280280
DYNET_ASSERT(xs[0]->d.bd == 1,
281281
"Bad dimensions in AddVectorToAllColumns::forward: " << xs[0]->d << ", " << xs[1]->d);
282-
Eigen::array<int, 3> bcasts0 = {1, 1, (int)xs[1]->d.bd};
283-
Eigen::array<int, 3> bcasts1 = {1, (int)xs[0]->d[1], 1};
282+
Eigen::array<ptrdiff_t, 3> bcasts0 = {1, 1, xs[1]->d.bd};
283+
Eigen::array<ptrdiff_t, 3> bcasts1 = {1, xs[0]->d[1], 1};
284284
tb<2>(fx).device(*dev.edevice) = tb<2>(*xs[0]).broadcast(bcasts0) + tb<2>(*xs[1]).broadcast(bcasts1);
285285
}
286286
#else
@@ -315,17 +315,17 @@ void AddVectorToAllColumns::backward_dev_impl(const MyDevice & dev,
315315
if(dEdf.d.bd == dEdxi.d.bd) {
316316
tvec(dEdxi).device(*dev.edevice) += tvec(dEdf);
317317
} else {
318-
Eigen::array<int, 1> red_axis = {2};
318+
Eigen::array<ptrdiff_t, 1> red_axis = {2};
319319
t<2>(dEdxi).device(*dev.edevice) += tb<2>(dEdf).sum(red_axis);
320320
}
321321
} else { // bias
322322
if(dEdf.d.bd == dEdxi.d.bd) {
323-
Eigen::array<int, 1> red_axis = {1};
323+
Eigen::array<ptrdiff_t, 1> red_axis = {1};
324324
tb<1>(dEdxi).device(*dev.edevice) += tb<2>(dEdf).sum(red_axis);
325325
} else {
326326
DYNET_ASSERT(dEdxi.d.bd == 1,
327327
"Bad dimensions in AddVectorToAllColumns::backward: " << xs[0]->d << ", " << xs[1]->d);
328-
Eigen::array<int, 2> red_axis = {1,2};
328+
Eigen::array<ptrdiff_t, 2> red_axis = {1,2};
329329
t<1>(dEdxi).device(*dev.edevice) += tb<2>(dEdf).sum(red_axis);
330330
}
331331
}

dynet/nodes-concat.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ void Concatenate::backward_dev_impl(const MyDevice & dev,
8484
if(dEdxi.d.bd == dEdf.d.bd) {
8585
tb<4>(dEdxi).device(*dev.edevice) += tb<4>(dEdf).slice(indices, sizes);
8686
} else {
87-
Eigen::array<int, 1> red_axis; red_axis[0] = 4;
87+
Eigen::array<ptrdiff_t, 1> red_axis = {4};
8888
t<4>(dEdxi).device(*dev.edevice) += tb<4>(dEdf).slice(indices, sizes).sum(red_axis);
8989
}
9090
}

dynet/nodes-contract.cc

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ void InnerProduct3D_1D::forward_dev_impl(const MyDevice & dev, const vector<cons
6363
// Handle hypothetical bias
6464
if (xs.size() == 3) {
6565
auto C = tb<2>(*xs[2]);
66-
Eigen::array<int, 3> bcast_C = {1, 1, (int)(xs[2]->d.bd == 1 ? fx.d.bd : 1)};
66+
Eigen::array<ptrdiff_t, 3> bcast_C = {1, 1, (xs[2]->d.bd == 1 ? fx.d.bd : 1)};
6767
tb<2>(fx).device(*dev.edevice) = C.broadcast(bcast_C);
6868
}
6969
#if defined(__CUDACC__) && !defined(DYNET_SKIP_CUDA_CONTRACTIONS)
@@ -81,7 +81,7 @@ void InnerProduct3D_1D::forward_dev_impl(const MyDevice & dev, const vector<cons
8181
// TODO : maybe on CPU broadcast is not as affective as looping?
8282
if (xs[0]->d.bd == 1) {
8383
// A is a 3 tensor
84-
Eigen::array<int, 2> bcast_b = {1, (int)(xs[1]->d.bd == 1 ? fx.d.bd : 1)};
84+
Eigen::array<ptrdiff_t, 2> bcast_b = {1, (xs[1]->d.bd == 1 ? fx.d.bd : 1)};
8585
auto b = tb<1>(*xs[1]);
8686
auto A = t<3>(*xs[0]);
8787
tb<2>(fx).device(*dev.edevice) += A.contract(b.broadcast(bcast_b), dims);
@@ -149,14 +149,14 @@ void InnerProduct3D_1D::backward_dev_impl(const MyDevice & dev,
149149
if (xs[0]->d.bd == 1) { // A is a 3 tensor
150150
// tensor product
151151
auto b = tb<1>(*xs[1]);
152-
Eigen::array<int, 2> bcast_b = {1, (int)(xs[1]->d.bd == 1 ? fx.d.bd : 1)};
152+
Eigen::array<ptrdiff_t, 2> bcast_b = {1, (xs[1]->d.bd == 1 ? fx.d.bd : 1)};
153153
Eigen::array<DimPair, 1> dims({{DimPair(2, 1)}});
154154
t<3>(dEdxi).device(*dev.edevice) += tdEdf.contract(b.broadcast(bcast_b), dims);
155155
} else {
156156
// For now if A is batched the CUDA version is not implemented
157157
if (xs[1]->d.bd == 1) {
158158
// auto b = t<1>(*xs[1]);
159-
// Eigen::array<int, 4> morph {dEdf.d[0], dEdf.d[1], xs[1]->d[0], dEdf.d.bd};
159+
// Eigen::array<ptrdiff_t, 4> morph {dEdf.d[0], dEdf.d[1], xs[1]->d[0], dEdf.d.bd};
160160
// tb<3>(dEdxi).device(*dev.edevice) += tdEdf.contract(b, Eigen::array<DimPair, 0> {{}}).reshape(morph);
161161
auto b = t<1>(*xs[1]);
162162
for (unsigned i = 0; i < fx.d.bd; ++i) {
@@ -211,7 +211,7 @@ void InnerProduct3D_1D::backward_dev_impl(const MyDevice & dev,
211211
if (xs[1]->d.bd == 1) { // b is a 1 tensor
212212
if (xs[0]->d.bd == 1) {
213213
auto A = t<3>(*xs[0]); // A is 3 tensor
214-
Eigen::array<int, 1> red_axis; red_axis[0] = 0;
214+
Eigen::array<ptrdiff_t, 1> red_axis = {0};
215215
Eigen::array<DimPair, 2> dims({{DimPair(0, 0), DimPair(1, 1)}});
216216
t<1>(dEdxi).device(*dev.edevice) += tdEdf.contract(A, dims).sum(red_axis);
217217
} else {
@@ -235,7 +235,7 @@ void InnerProduct3D_1D::backward_dev_impl(const MyDevice & dev,
235235
#endif
236236
} else if (i == 2) { // dEdC
237237
if (xs[2]->d.bd == 1) {
238-
Eigen::array<int, 1> red_axis; red_axis[0] = 2;
238+
Eigen::array<ptrdiff_t, 1> red_axis = {2};
239239
t<2>(dEdxi).device(*dev.edevice) += tdEdf.sum(red_axis);
240240
} else {
241241
tb<2>(dEdxi).device(*dev.edevice) += tdEdf;

dynet/nodes-conv2d.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,7 @@ void Conv2D::backward_dev_impl(const MyDevice & dev,
214214
t<4>(HWCN_dEdxi).device(*dev.edevice) = t<4>(NCHW_dEdxi).shuffle(shuffles);
215215
t<4>(dEdxi).device(*dev.edevice) += t<4>(HWCN_dEdxi);
216216
} else { //backward w.r.t the bias
217-
Eigen::array<int, 3> red_axis = {0, 1, 3};
217+
Eigen::array<ptrdiff_t, 3> red_axis = {0, 1, 3};
218218
t<1>(dEdxi).device(*dev.edevice) += tb<3>(dEdf).sum(red_axis);
219219
}
220220
#endif

0 commit comments

Comments
 (0)