20 InferenceEngine::ExecutableNetwork
net;
32 const std::string &pathToModel,
33 int maxBatch,
bool isBatchDynamic,
bool isAsync,
34 bool doRawOutputMessages );
39 void load_into( InferenceEngine::Core & ie,
const std::string & deviceName );
InferenceEngine::InferRequest::Ptr _request
base_detection(std::string topoName, const std::string &pathToModel, int maxBatch, bool isBatchDynamic, bool isAsync, bool doRawOutputMessages)
InferenceEngine::ExecutableNetwork net
void load_into(InferenceEngine::Core &ie, const std::string &deviceName)
virtual ~base_detection()=default
virtual void submit_request()
const bool doRawOutputMessages
virtual InferenceEngine::CNNNetwork read_network()=0
InferenceEngine::ExecutableNetwork * operator->()