8 using namespace InferenceEngine;
13 base_detection::base_detection(
16 int maxBatch,
bool isBatchDynamic,
bool isAsync,
17 bool doRawOutputMessages
19 : topoName( topoName ), pathToModel( pathToModel )
20 , maxBatch( maxBatch ), isBatchDynamic( isBatchDynamic ), isAsync( isAsync )
21 , enablingChecked( false ), _enabled( false ), doRawOutputMessages( doRawOutputMessages )
41 _request->Wait( IInferRequest::WaitMode::RESULT_READY );
63 std::map<std::string, std::string>
config = { };
66 bool isPossibleDynBatch =
67 deviceName.find(
"CPU" ) != std::string::npos ||
68 deviceName.find(
"GPU" ) != std::string::npos;
69 if( isPossibleDynBatch )
70 config[PluginConfigParams::KEY_DYN_BATCH_ENABLED] = PluginConfigParams::YES;