// get all used op's output, drop unused op, won't change op order. always insert all Input Ops std::vector<const Op*> oplists; { for (std::pair<Schedule::BackendCache, vector<Schedule::OpCacheInfo>>& pipeline : scheduleInfo.pipelineInfo) { for (auto& info : pipeline.second) { oplists.push_back(info.op); } } } // set tensors' input/output usage by oplists info setInputOutputForOps(allTensors, oplists, net->usage() == Usage_INFERENCE_STATIC);
// add output index by config info and outputName std::unordered_map<std::string, int> tensorNameIndexMap; for (int i = 0; i < net->tensorName()->size(); ++i) { tensorNameIndexMap[net->tensorName()->Get(i)->str()] = i; } bool userSetOutput = false; //这里时对应着,比如你想打印一些中间tensor, 就需要在tensormap中根据name找到他们,并当成输出tensor。 for (auto& config : configs) { userSetOutput = userSetOutput || (!config.saveTensors.empty()); for (constauto& name : config.saveTensors) { auto iter = tensorNameIndexMap.find(name); if (iter != tensorNameIndexMap.end()) { auto t = allTensors[iter->second].get(); if (TensorUtils::getDescribe(t)->usage == Tensor::InsideDescribe::NORMAL) { TensorUtils::getDescribe(t)->usage = Tensor::InsideDescribe::OUTPUT; } scheduleInfo.outputTensor.insert( std::make_pair(net->tensorName()->GetAsString(iter->second)->c_str(), t)); } else { MNN_PRINT("Bad outputname: %s\n", name.c_str()); } } } if (net->outputName()) { userSetOutput = userSetOutput || net->outputName()->size() >= 1; for (int i = 0; i < net->outputName()->size(); ++i) { std::string name = net->outputName()->Get(i)->str(); auto iter = tensorNameIndexMap.find(name); if (iter != tensorNameIndexMap.end()) { auto t = allTensors[iter->second].get(); if (TensorUtils::getDescribe(t)->usage == Tensor::InsideDescribe::NORMAL) { TensorUtils::getDescribe(t)->usage = Tensor::InsideDescribe::OUTPUT; } scheduleInfo.outputTensor.insert( std::make_pair(net->tensorName()->GetAsString(iter->second)->c_str(), t)); } } } if (scheduleInfo.outputTensor.empty()) { userSetOutput = false; } // add input/output tensor to schedule's input/output //配置scheduleInfo 记录模型的输入 输出张量 for (int index = 0; index < allTensors.size(); index++) { auto t = allTensors[index].get(); auto usage = TensorUtils::getDescribe(t)->usage; //usage NORMAL ,INPUT if (usage == Tensor::InsideDescribe::INPUT) { scheduleInfo.inputTensors.insert(std::make_pair(net->tensorName()->GetAsString(index)->c_str(), t)); } if (usage == Tensor::InsideDescribe::OUTPUT && (!userSetOutput)) { scheduleInfo.outputTensor.insert( std::make_pair(net->tensorName()->GetAsString(index)->c_str(), t)); } } // 是否是静态推理 if (net->usage() == Usage_INFERENCE_STATIC) { for (auto& pipInfo : scheduleInfo.pipelineInfo) { pipInfo.first.needComputeGeometry = false; pipInfo.first.needComputeShape = false; } } // 设置flag needComputeGeometry需要计算 计算图buildConstantTensors #ifndef MNN_BUILD_MINI for (auto iter = scheduleInfo.pipelineInfo.begin(); iter != scheduleInfo.pipelineInfo.end();) { if (!iter->first.needComputeGeometry) { // For static model don't need check const iter++; continue; } auto breakIndex = GeometryComputerUtils::buildConstantTensors(iter->second); if (breakIndex >= 0) { scheduleInfo.needInputContentForShape = true; } #ifdef MNN_SEPERTE_SIZE if (breakIndex >= 0 && (breakIndex + 1) < iter->second.size()) { // Split oplist std::vector<Schedule::PipelineInfo> fuse; std::vector<Schedule::PipelineInfo> separate; fuse.insert(fuse.begin(), iter->second.begin(), iter->second.begin() + breakIndex + 1); separate.insert(separate.begin(), iter->second.begin() + breakIndex + 1, iter->second.end()); oplists.clear(); iter->second = std::move(separate); iter = scheduleInfo.pipelineInfo.insert(iter, std::make_pair(iter->first, fuse)); iter++; iter++; } else { iter++; } #else iter++; #endif } #endif returntrue; }
mBackend->onResizeBegin(); auto curBackend = iter.execution->backend(); if (mAllocInput) { for (auto t : iter.workInputs) { auto allocRes = _allocTensor(t, curBackend, mOutputStatic); if (!allocRes) { return OUT_OF_MEMORY; } } } { for (auto t : iter.workOutputs) { auto res = _allocTensor(t, curBackend, mOutputStatic); if (!res) { return OUT_OF_MEMORY; } } } mBackend->onResizeEnd();