Skip to content
This repository was archived by the owner on Jul 1, 2025. It is now read-only.

Commit 28bac25

Browse files
[cleanup] Remove depth parameter from ConvolutionNode.
1 parent 35a22a8 commit 28bac25

File tree

19 files changed

+44
-54
lines changed

19 files changed

+44
-54
lines changed

include/glow/Graph/Graph.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -164,8 +164,8 @@ class Function final : public Named {
164164

165165
ConvolutionNode *createConv(llvm::StringRef name, NodeValue input,
166166
NodeValue filter, NodeValue bias, TypeRef outTy,
167-
size_t depth, size_t kernel, size_t stride,
168-
size_t pad, size_t group);
167+
size_t kernel, size_t stride, size_t pad,
168+
size_t group);
169169

170170
PoolMaxNode *createPoolMax(llvm::StringRef name, NodeValue input,
171171
size_t kernel, size_t stride, size_t pad);

lib/Backends/CPU/Transforms.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ using llvm::isa;
1818
/// pre-swizzle the data in the weights to make the access pattern more
1919
/// efficient.
2020
static Node *optimizeCPUConv(ConvolutionNode *CN, Function *F) {
21-
auto depth = CN->getDepth();
21+
auto depth = CN->getFilter().dims()[0];
2222
auto *M = F->getParent();
2323

2424
// The depth dimension must be a multiple of 64 to perform the
@@ -62,7 +62,7 @@ static Node *optimizeCPUConv(ConvolutionNode *CN, Function *F) {
6262

6363
return F->addNode(new CPUConvDKKC8Node(
6464
CN->getName(), CN->getType(), CN->getInput(), filter8, CN->getBias(),
65-
CN->getKernel(), CN->getStride(), CN->getPad(), CN->getDepth()));
65+
CN->getKernel(), CN->getStride(), CN->getPad()));
6666
}
6767

6868
bool CPUBackend::transformPostLowering(Function *F) {

lib/Backends/OpenCL/OpenCL.cpp

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -431,11 +431,9 @@ void OCLBackend::doForwardPass() {
431431
setKernelArg(kernel, 9, idim);
432432
setKernelArg(kernel, 10, ShapeNHWC(CC->getFilter()->getType()->dims()));
433433

434-
auto depth = CC->getDepth();
435-
436434
// Use a 3D grid where the first dimension is the depth and the second
437435
// dimension is the slice index in the batch.
438-
enqueueKernel(commands_, kernel, deviceId_, {odim.h, odim.w, depth},
436+
enqueueKernel(commands_, kernel, deviceId_, {odim.h, odim.w, odim.c},
439437
kernelLaunches);
440438
continue;
441439
}

lib/Graph/Graph.cpp

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -372,38 +372,37 @@ ConvolutionNode *Function::createConv(llvm::StringRef name, NodeValue input,
372372
auto OT = getParent()->uniqueType(ElemKind::FloatTy, outDims);
373373

374374
return addNode(new ConvolutionNode(name, OT, input, filter, bias, kernel,
375-
stride, pad, depth, /*group = */ 1));
375+
stride, pad, /*group = */ 1));
376376
}
377377

378378
/// Check that the dimensions that are passed in when the convolution is
379379
/// constructed are correct.
380380
static void assertConvDims(NodeValue input, NodeValue filter, NodeValue bias,
381-
size_t depth, size_t kernel, size_t stride,
382-
size_t pad, size_t group) {
381+
size_t kernel, size_t stride, size_t pad,
382+
size_t group) {
383383
ShapeNHWC idim = ShapeNHWC(input.dims());
384384
assert(idim.w >= kernel && idim.h >= kernel &&
385385
"buffer too small for selected stride");
386386
assert(idim.c % group == 0 && "channels number must be divisible by groups");
387387
(void)idim;
388388

389389
auto filterDims = filter->dims();
390-
assert(filterDims[0] == depth * group && filterDims[1] == kernel &&
390+
assert(filterDims[0] % group == 0 && filterDims[1] == kernel &&
391391
filterDims[2] == kernel && filterDims[3] == idim.c / group &&
392392
"Invalid filter dims");
393393
(void)filterDims;
394394

395-
assert(bias->getType()->size() == depth * group && "Invalid bias size");
395+
assert(bias->getType()->size() == filterDims[0] && "Invalid bias size");
396396
}
397397

398398
ConvolutionNode *Function::createConv(llvm::StringRef name, NodeValue input,
399399
NodeValue filter, NodeValue bias,
400-
TypeRef outTy, size_t depth,
401-
size_t kernel, size_t stride, size_t pad,
402-
size_t group) {
403-
assertConvDims(input, filter, bias, depth, kernel, stride, pad, group);
400+
TypeRef outTy, size_t kernel,
401+
size_t stride, size_t pad, size_t group) {
402+
assertConvDims(input, filter, bias, kernel, stride, pad, group);
404403
auto OT = getParent()->uniqueType(*outTy);
405404
return addNode(new ConvolutionNode(name, OT, input, filter, bias, kernel,
406-
stride, pad, depth, group));
405+
stride, pad, group));
407406
}
408407

409408
PoolMaxNode *Function::createPoolMax(llvm::StringRef name, NodeValue input,

lib/Graph/Nodes.cpp

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -488,7 +488,7 @@ static void checkType(NodeValue A, ElemKind expectedType) {
488488

489489
static void verifyConvolution(NodeValue src, NodeValue dest, NodeValue filter,
490490
NodeValue bias, size_t kernel, size_t stride,
491-
size_t pad, size_t depth, size_t group) {
491+
size_t pad, size_t group) {
492492
assert(src.getElementType() == dest.getElementType() && "Invalid Type");
493493
assert(src.getElementType() == filter.getElementType() && "Invalid Type");
494494
assert(src.getElementType() == bias.getElementType() && "Invalid Type");
@@ -501,15 +501,14 @@ static void verifyConvolution(NodeValue src, NodeValue dest, NodeValue filter,
501501
assert(idim.c % group == 0 && "channels number must be divisible by groups");
502502

503503
auto outSz = calculateConvOutputDims(idim.h, idim.w, kernel, stride, pad);
504-
ShapeNHWC exp(idim.n, outSz.first, outSz.second, depth * group);
505-
(void)exp;
506-
assert(exp == odim && "Invalid output dimensions");
504+
assert(odim.n == idim.n && odim.h == outSz.first && odim.w == outSz.second &&
505+
odim.c % group == 0 && "Invalid output dimensions");
507506

508-
auto filterDims = {depth * group, kernel, kernel, idim.c / group};
507+
auto filterDims = {odim.c, kernel, kernel, idim.c / group};
509508
assert(filter.getType()->dims().equals(filterDims) && "Invalid filter dims");
510509
(void)filterDims;
511510

512-
auto biasDims = {depth * group};
511+
auto biasDims = {odim.c};
513512
assert(bias.getType()->dims().equals(biasDims) && "Invalid bias dims");
514513
(void)biasDims;
515514
}
@@ -596,14 +595,14 @@ static void verifyRegression(NodeValue src, NodeValue dest,
596595

597596
void ConvolutionNode::verify() const {
598597
verifyConvolution(getInput(), getResult(), getFilter(), getBias(), Kernel_,
599-
Stride_, Pad_, Depth_, Group_);
598+
Stride_, Pad_, Group_);
600599
}
601600

602601
void ConvolutionGradNode::verify() const {
603602
verifyConvolution(getGradOfInputNamedInput(),
604603
getGradOfOriginalOutputNamedResult(),
605604
getGradOfInputNamedFilter(), getGradOfInputNamedBias(),
606-
Kernel_, Stride_, Pad_, Depth_, Group_);
605+
Kernel_, Stride_, Pad_, Group_);
607606
}
608607

609608
void PoolMaxNode::verify() const {

lib/IR/IRGen.cpp

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -115,10 +115,9 @@ struct IRGenVisitor : NodeWalker {
115115
auto *filterG = builder_.createAllocActivationInst("conv.filter.G",
116116
filter->getType());
117117

118-
builder_.createConvolutionGradInst(N->getName(), input, filter, outGrad,
119-
inG, filterG, biasG, CG->getKernel(),
120-
CG->getStride(), CG->getPad(),
121-
CG->getDepth(), CG->getGroup());
118+
builder_.createConvolutionGradInst(
119+
N->getName(), input, filter, outGrad, inG, filterG, biasG,
120+
CG->getKernel(), CG->getStride(), CG->getPad(), CG->getGroup());
122121

123122
registerIR(CG->getGradOfInputNamedInput(), inG);
124123
registerIR(CG->getGradOfInputNamedFilter(), filterG);

lib/Importer/Caffe2.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -218,8 +218,8 @@ void caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) {
218218
{idim.n, outSz.first, outSz.second, depth}};
219219
auto outTy = G_.getParent()->uniqueType(ElemKind::FloatTy, outDims);
220220

221-
auto *node = G_.createConv(opName, tr, filter, bias, outTy, depth / group,
222-
kernel, stride, pad, group);
221+
auto *node = G_.createConv(opName, tr, filter, bias, outTy, kernel, stride,
222+
pad, group);
223223

224224
// Transpose the output back.
225225
auto *N = G_.createTranspose(opName, node, NHWC2NCHW);

lib/Optimizer/GraphOptimizer.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -762,10 +762,10 @@ static void optimizeQuantization(Function *F) {
762762
if (auto *CN = dyn_cast<ConvolutionNode>(RS->getInput())) {
763763
// Create the exact same convolution but with a different scaling
764764
// return type.
765-
auto *newCN = F->createConv(
766-
CN->getName(), CN->getInput(), CN->getFilter(), CN->getBias(),
767-
RS->getType(), CN->getDepth(), CN->getKernel(), CN->getStride(),
768-
CN->getPad(), CN->getGroup());
765+
auto *newCN =
766+
F->createConv(CN->getName(), CN->getInput(), CN->getFilter(),
767+
CN->getBias(), RS->getType(), CN->getKernel(),
768+
CN->getStride(), CN->getPad(), CN->getGroup());
769769
RS->getResult().replaceAllUsesOfWith(newCN);
770770
continue;
771771
}

lib/Optimizer/Lower.cpp

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -504,9 +504,8 @@ void lowerGroupConvolutionNode(Function *F, ConvolutionNode &BNG) {
504504
{(groupId + 1) * outCperG, kernel, kernel, inCperG});
505505
auto *bias_slice = F->createSlice(BNG.getName(), bias, {groupId * outCperG},
506506
{(groupId + 1) * outCperG});
507-
convs[groupId] =
508-
F->createConv(BNG.getName(), in_slice, filter_slice, bias_slice, outTy,
509-
outCperG, kernel, stride, pad, 1);
507+
convs[groupId] = F->createConv(BNG.getName(), in_slice, filter_slice,
508+
bias_slice, outTy, kernel, stride, pad, 1);
510509
}
511510
auto result = F->createConcat(BNG.getName(), convs, 3);
512511
BNG.getResult().replaceAllUsesOfWith(result);

lib/Quantization/Quantization.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -322,10 +322,10 @@ void generateQuantizedGraph(
322322
auto QT = F->getParent()->uniqueType(ElemKind::Int8QTy,
323323
CV->getResult()->dims(),
324324
TQP.scale_, TQP.offset_);
325-
quantizedNode = F->createConv(
326-
CV->getName(), quantizedInputs[0], quantizedInputs[1],
327-
quantizedInputs[2], QT, CV->getDepth(), CV->getKernel(),
328-
CV->getStride(), CV->getPad(), CV->getGroup());
325+
quantizedNode =
326+
F->createConv(CV->getName(), quantizedInputs[0], quantizedInputs[1],
327+
quantizedInputs[2], QT, CV->getKernel(),
328+
CV->getStride(), CV->getPad(), CV->getGroup());
329329
break;
330330
}
331331
case Kinded::Kind::SliceNodeKind: {

0 commit comments

Comments
 (0)