#ifndef CNAMASDK_H #define CNAMASDK_H #ifdef _WIN32 #ifdef NAMA_BUILD_SHARED_LIB #define FUNAMA_API __declspec(dllexport) #else #define FUNAMA_API #endif #else #define FUNAMA_API __attribute__((visibility("default"))) #endif #define FU_FORMAT_BGRA_BUFFER 0 #define FU_FORMAT_RGBA_TEXTURE 1 #define FU_FORMAT_NV21_BUFFER 2 #define FU_FORMAT_GL_CURRENT_FRAMEBUFFER 3 #define FU_FORMAT_RGBA_BUFFER 4 #define FU_FORMAT_CAMERA 5 #define FU_FORMAT_RGBA_TEXTURE_EXTERNAL_OES 6 #define FU_FORMAT_ANDROID_DUAL 7 #define FU_FORMAT_NV12_BUFFER 8 #define FU_FORMAT_INTERNAL_IOS_DUAL_INPUT 9 #define FU_FORMAT_GL_SPECIFIED_FRAMEBUFFER 10 #define FU_FORMAT_AVATAR_INFO 12 #define FU_FORMAT_I420_BUFFER 13 #define FU_FORMAT_VOID 14 #define NAMA_FILTER_TYPE_PRE 1 #define NAMA_FILTER_TYPE_ONCE 2 #define NAMA_FILTER_TYPE_FXAA 4 #define NAMA_FILTER_TYPE_NORMAL 8 #define DETECTOR_IMAGE_FORMAT_RGBA 0 #define DETECTOR_IMAGE_FORMAT_MONO 1 #define DETECTOR_IMAGE_FORMAT_BGRA 2 #define DETECTOR_IMAGE_FORMAT_NV21 3 #define DETECTOR_IMAGE_FORMAT_NV12 4 #define DETECTOR_IMAGE_FORMAT_I420 5 typedef enum FUAITYPE { FUAITYPE_NONE = 0, FUAITYPE_BACKGROUNDSEGMENTATION = 1 << 1, FUAITYPE_HAIRSEGMENTATION = 1 << 2, FUAITYPE_HANDGESTURE = 1 << 3, FUAITYPE_HANDPROCESSOR = 1 << 4, FUAITYPE_TONGUETRACKING = 1 << 5, FUAITYPE_HUMANPOSE2D = 1 << 6, FUAITYPE_BACKGROUNDSEGMENTATION_GREEN = 1 << 7, FUAITYPE_FACEPROCESSOR = 1 << 8, FUAITYPE_HUMAN_PROCESSOR = 1 << 9, FUAITYPE_FACE_RECOGNIZER = 1 << 10, FUAITYPE_IMAGE_BEAUTY = 1 << 11, FUAITYPE_FACE_ATTRIBUTE_PROCESSOR = 1 << 12, FUAITYPE_FACELANDMARKS75 = 1 << 13, FUAITYPE_FACELANDMARKS209 = 1 << 14, FUAITYPE_FACELANDMARKS239 = 1 << 15 } FUAITYPE; #define FUAITYPE_FACEPROCESSOR_FACECAPTURE 1048576 // 1<<20 #define FUAITYPE_FACEPROCESSOR_FACECAPTURE_TONGUETRACKING 2097152 // 1<<21 #define FUAITYPE_FACEPROCESSOR_HAIRSEGMENTATION 4194304 // 1<<22 #define FUAITYPE_FACEPROCESSOR_HEADSEGMENTATION 8388608 // 1<<23 #define FUAITYPE_FACEPROCESSOR_EXPRESSION_RECOGNIZER 16777216 // 1<<24 #define FUAITYPE_FACEPROCESSOR_EMOTION_RECOGNIZER 33554432 // 1<<25 #define FUAITYPE_FACEPROCESSOR_DISNEYGAN 67108864 // 1<<26 #define FUAITYPE_FACEPROCESSOR_FACEID 134217728 // 1<<27 #define FUAITYPE_HUMAN_PROCESSOR_DETECT 268435456 // 1<<28 #define FUAITYPE_HUMAN_PROCESSOR_SEGMENTATION 536870912 // 1<<29 #define FUAITYPE_HUMAN_PROCESSOR_2D_SELFIE 1073741824 // 1<<30 #define FUAITYPE_HUMAN_PROCESSOR_2D_DANCE 2147483648 // 1<<31 #define FUAITYPE_HUMAN_PROCESSOR_2D_SLIM 4294967296 // 1<<32 #define FUAITYPE_HUMAN_PROCESSOR_3D_SELFIE 8589934592 // 1<<33 #define FUAITYPE_HUMAN_PROCESSOR_3D_DANCE 17179869184 // 1<<34 #define FUAITYPE_HUMAN_PROCESSOR_2D_IMGSLIM 34359738368 // 1<<35 #define FUAITYPE_IMAGE_BEAUTY_UNKNOW 68719476736 // 1<<36 #define FUAITYPE_FACEPROCESSOR_LIPSOCCUSEGMENT 137438953472 // 1<<37 #define FUAITYPE_FACEPROCESSOR_FACEOCCUSEGMENT 274877906944 // 1<<38 typedef enum FUAIGESTURETYPE { FUAIGESTURE_NO_HAND = -1, FUAIGESTURE_UNKNOWN = 0, FUAIGESTURE_THUMB = 1, FUAIGESTURE_KORHEART = 2, FUAIGESTURE_SIX = 3, FUAIGESTURE_FIST = 4, FUAIGESTURE_PALM = 5, FUAIGESTURE_ONE = 6, FUAIGESTURE_TWO = 7, FUAIGESTURE_OK = 8, FUAIGESTURE_ROCK = 9, FUAIGESTURE_CROSS = 10, FUAIGESTURE_HOLD = 11, FUAIGESTURE_GREET = 12, FUAIGESTURE_PHOTO = 13, FUAIGESTURE_HEART = 14, FUAIGESTURE_MERGE = 15, FUAIGESTURE_EIGHT = 16, FUAIGESTURE_HALFFIST = 17, FUAIGESTURE_GUN = 18, } FUAIGESTURETYPE; typedef enum FULOGLEVEL { FU_LOG_LEVEL_TRACE = 0, FU_LOG_LEVEL_DEBUG = 1, FU_LOG_LEVEL_INFO = 2, FU_LOG_LEVEL_WARN = 3, FU_LOG_LEVEL_ERROR = 4, FU_LOG_LEVEL_CRITICAL = 5, FU_LOG_LEVEL_OFF = 6 } FULOGLEVEL; typedef enum FUAIEXPRESSIONTYPE { FUAIEXPRESSION_UNKNOWN = 0, FUAIEXPRESSION_BROW_UP = 1 << 1, FUAIEXPRESSION_BROW_FROWN = 1 << 2, FUAIEXPRESSION_LEFT_EYE_CLOSE = 1 << 3, FUAIEXPRESSION_RIGHT_EYE_CLOSE = 1 << 4, FUAIEXPRESSION_EYE_WIDE = 1 << 5, FUAIEXPRESSION_MOUTH_SMILE_LEFT = 1 << 6, FUAIEXPRESSION_MOUTH_SMILE_RIGHT = 1 << 7, FUAIEXPRESSION_MOUTH_FUNNEL = 1 << 8, FUAIEXPRESSION_MOUTH_OPEN = 1 << 9, FUAIEXPRESSION_MOUTH_PUCKER = 1 << 10, FUAIEXPRESSION_MOUTH_ROLL = 1 << 11, FUAIEXPRESSION_MOUTH_PUFF = 1 << 12, FUAIEXPRESSION_MOUTH_SMILE = 1 << 13, FUAIEXPRESSION_MOUTH_FROWN = 1 << 14, FUAIEXPRESSION_HEAD_LEFT = 1 << 15, FUAIEXPRESSION_HEAD_RIGHT = 1 << 16, FUAIEXPRESSION_HEAD_NOD = 1 << 17, } FUAIEXPRESSIONTYPE; typedef enum FUAITONGUETYPE { FUAITONGUE_UNKNOWN = 0, FUAITONGUE_UP = 1 << 1, FUAITONGUE_DOWN = 1 << 2, FUAITONGUE_LEFT = 1 << 3, FUAITONGUE_RIGHT = 1 << 4, FUAITONGUE_LEFT_UP = 1 << 5, FUAITONGUE_LEFT_DOWN = 1 << 6, FUAITONGUE_RIGHT_UP = 1 << 7, FUAITONGUE_RIGHT_DOWN = 1 << 8, } FUAITONGUETYPE; typedef enum FUITEMTRIGGERTYPE { FUITEMTRIGGER_UNKNOWN = 0, FUITEMTRIGGER_CAI_DAN = 1, } FUITEMTRIGGERTYPE; typedef enum FUAIEMOTIONTYPE { FUAIEMOTION_UNKNOWN = 0, FUAIEMOTION_HAPPY = 1 << 1, FUAIEMOTION_SAD = 1 << 2, FUAIEMOTION_ANGRY = 1 << 3, FUAIEMOTION_SURPRISE = 1 << 4, FUAIEMOTION_FEAR = 1 << 5, FUAIEMOTION_DISGUST = 1 << 6, FUAIEMOTION_NEUTRAL = 1 << 7, FUAIEMOTION_CONFUSE = 1 << 8, } FUAIEMOTIONTYPE; typedef enum FUAIHUMANSTATE { FUAIHUMAN_NO_BODY = 0, FUAIHUMAN_HALF_LESS_BODY = 1, FUAIHUMAN_HALF_BODY = 2, FUAIHUMAN_HALF_MORE_BODY = 3, FUAIHUMAN_FULL_BODY = 4, } FUAIHUMANSTATE; typedef enum FUAISCENESTATE { FUAISCENE_UNKNOWN = -1, FUAISCENE_SELFIE = 0, FUAISCENE_DANCE = 1, FUAISCENE_SLIM = 2, } FUAISCENESTATE; typedef enum FUAIHUMANFOLLOWMODE { FUAIHUMAN_FOLLOW_MODE_UNKNOWN = -1, FUAIHUMAN_FOLLOW_MODE_FIX = 0, FUAIHUMAN_FOLLOW_MODE_ALIGN = 1, FUAIHUMAN_FOLLOW_MODE_STAGE = 2 } FUAIHUMANFOLLOWMODE; typedef enum FUAIHUMANMIRRORTYPE { FUAIHUMAN_MIRROR_LR = 0, FUAIHUMAN_MIRROR_TB = 1, FUAIHUMAN_MIRROR_BOTH = 2, FUAIHUMAN_MIRROR_NONE = 3 } FUAIHUMANMIRRORTYPE; typedef enum FUAIHUMANROTTYPE { FUAIHUMAN_ROT_0 = 0, FUAIHUMAN_ROT_90 = 1, FUAIHUMAN_ROT_180 = 2, FUAIHUMAN_ROT_270 = 3 } FUAIHUMANROTTYPE; typedef enum FUAIHUMANSEGSCENETYPE { FUAIHUMAN_SEG_MEETING = 0, FUAIHUMAN_SEG_COMMON = 1 } FUAIHUMANSEGSCENETYPE; typedef enum TRANSFORM_MATRIX { /* * 8 base orientation cases, first do counter-clockwise rotation in degree, * then do flip */ DEFAULT = 0, // no rotation, no flip CCROT0 = DEFAULT, // no rotation, no flip CCROT90, // counter-clockwise rotate 90 degree CCROT180, // counter-clockwise rotate 180 degree CCROT270, // counter-clockwise rotate 270 degree CCROT0_FLIPVERTICAL, // vertical flip CCROT0_FLIPHORIZONTAL, // horizontal flip CCROT90_FLIPVERTICAL, // first counter-clockwise rotate 90 degree,then // vertical flip CCROT90_FLIPHORIZONTAL, // first counter-clockwise rotate 90 degree,then // horizontal flip /* * enums below is alias to above enums, there are only 8 orientation cases */ CCROT0_FLIPVERTICAL_FLIPHORIZONTAL = CCROT180, CCROT90_FLIPVERTICAL_FLIPHORIZONTAL = CCROT270, CCROT180_FLIPVERTICAL = CCROT0_FLIPHORIZONTAL, CCROT180_FLIPHORIZONTAL = CCROT0_FLIPVERTICAL, CCROT180_FLIPVERTICAL_FLIPHORIZONTAL = DEFAULT, CCROT270_FLIPVERTICAL = CCROT90_FLIPHORIZONTAL, CCROT270_FLIPHORIZONTAL = CCROT90_FLIPVERTICAL, CCROT270_FLIPVERTICAL_FLIPHORIZONTAL = CCROT90, } TRANSFORM_MATRIX; #define FU_ROTATION_MODE_0 0 #define FU_ROTATION_MODE_90 1 #define FU_ROTATION_MODE_180 2 #define FU_ROTATION_MODE_270 3 /*\brief An I/O format where `ptr` points to a BGRA buffer. It matches the * camera format on iOS. */ #define FU_FORMAT_BGRA_BUFFER 0 /*\brief An I/O format where `ptr` points to a single GLuint that is a RGBA * texture. It matches the hardware encoding format on Android. */ #define FU_FORMAT_RGBA_TEXTURE 1 /*\brief An I/O format where `ptr` points to an NV21 buffer. It matches the * camera preview format on Android. */ #define FU_FORMAT_NV21_BUFFER 2 #define FU_FORMAT_I420_BUFFER 13 /*\brief An output-only format where `out_ptr` is NULL or points to a TGLRenderingDesc structure. The result is rendered onto the current GL framebuffer no matter what `out_ptr` is. If a TGLRenderingDesc is specified, we can optionally return an image to the caller in the specified format. */ #define FU_FORMAT_GL_CURRENT_FRAMEBUFFER 3 /*\brief An I/O format where `ptr` points to a RGBA buffer. */ #define FU_FORMAT_RGBA_BUFFER 4 /*\brief An input-only format where `ptr` points to a TCameraDesc struct. The * input is directly taken from the specified camera. w and h are taken as the * preferred image size*/ #define FU_FORMAT_CAMERA 5 /*\brief An input-only format where `in_ptr` points to a single GLuint that is * an EXTERNAL_OES texture. It matches the hardware encoding format on Android. */ #define FU_FORMAT_RGBA_TEXTURE_EXTERNAL_OES 6 /*\brief An I/O format where `in_ptr` points to a TAndroidDualMode struct, which provides both a texture and an NV21 buffer as input. As the name suggests, this is the most efficient interface on Android. */ #define FU_FORMAT_ANDROID_DUAL_MODE 7 typedef struct { int camera_id; // 0 means turning on, enable <= 0 means turning off */ FUNAMA_API int fuSetTongueTracking(int i); /** \brief Turn on or turn off multisample anti-alising, msaa has an impact on performance. \param samples > 0 means turning on, samples <= 0 means turning off, 0 by default. samples<=GL_MAX_SAMPLES(usually 4). */ FUNAMA_API int fuSetMultiSamples(int samples); /** \brief Load Tongue Detector data, to support tongue animation. \param data - the pointer to tongue model data 'tongue.bundle', which is along beside lib files in SDK package \param sz - the data size, we use plain int to avoid cross-language compilation issues \return zero for failure, one for success */ FUNAMA_API int fuLoadTongueModel(void* data, int sz); /** \warning deprecated api. */ FUNAMA_API void fuSetStrictTracking(int i); /** \brief Get the current rotationMode. \return the current rotationMode, one of 0..3 should work. */ FUNAMA_API int fuGetCurrentRotationMode(); /** \brief Get certificate permission code for modules \param i - get i-th code, currently available for 0 and 1 \return The permission code */ FUNAMA_API int fuGetModuleCode(int i); /** \brief the same as fuIsTracking */ FUNAMA_API int fuHasFace(); /** \warning deprecated api. */ FUNAMA_API int fuSetASYNCTrackFace(int i); /** \brief Clear Physics World \return 0 means physics disabled and no need to clear,1 means cleared successfully */ FUNAMA_API int fuClearPhysics(); /** \warning depreated api */ FUNAMA_API int fuSetFaceDetParam(void* name, void* pinput); /** \warning depreated api */ FUNAMA_API int fuSetFaceTrackParam(void* name, void* pinput); /** \brief Get SDK version string, Major.Minor.Fix_ID \return SDK version string in const char* */ FUNAMA_API const char* fuGetVersion(); /** \brief Get SDK commit time string \return SDK commit time string in const char* */ FUNAMA_API const char* fuGetCommitTime(); /** \brief Load AI model data, to support tongue animation. \param data - the pointer to AI model data 'ai_xxx.bundle',which is along beside lib files in SDK package \param sz - the data size, we use plain int to avoid cross-language compilation issues \param type - define in FUAITYPE enumeration. \return zero for failure, one for success. */ FUNAMA_API int fuLoadAIModelFromPackage(void* data, int sz, FUAITYPE type); /** \brief Preprocess AI model. \param data - the pointer to AI model data 'ai_xxx.bundle',which is along beside lib files in SDK package \param sz - the data size, we use plain int to avoid cross-language compilation issues \param type - define in FUAITYPE enumeration. \return zero for failure, one for success. */ FUNAMA_API int fuPreprocessAIModelFromPackage(void* data, int sz, FUAITYPE type); /** \brief Release AI Model, when no more need some type of AI albility. \param type - define in FUAITYPE enumeration. \return zero for failure, one for success. */ FUNAMA_API int fuReleaseAIModel(FUAITYPE type); /** \brief Get AI Model load status \param type - define in FUAITYPE enumeration. \return zero for unloaded, one for loaded. */ FUNAMA_API int fuIsAIModelLoaded(FUAITYPE type); /** \brief Render a list of items on top of a GLES texture or a memory buffer. This function needs a GLES 2.0+ context. \param texid specifies a GLES texture. Set it to 0u if you want to render to a memory buffer. \param img specifies a memory buffer. Set it to NULL if you want to render to a texture. If img is non-NULL, it will be overwritten by the rendered image when fuRenderItems returns \param w specifies the image width \param h specifies the image height \param frameid specifies the current frame id. To get animated effects, please increase frame_id by 1 whenever you call this. \param p_items points to the list of items \param n_items is the number of items \return a new GLES texture containing the rendered image in the texture mode */ FUNAMA_API int fuRenderItems(int texid, int* img, int w, int h, int frame_id, int* p_items, int n_items); /** \brief set crop state. \param state is the Cropped switch \return zero for closed, one for open */ FUNAMA_API int fuSetCropState(int state); /** \brief Set the coordinates of the crop. \param (x0,y0) is the coordinates of the starting point after cropping. (x0,y0) is (0,0) befor cropping \param (x1,y1) is the coordinate of the end point after cropping. (x1,y1) is (imageWideth, imageHeight) before cropping \return zero for failure, one for success */ FUNAMA_API int fuSetCropFreePixel(int x0, int y0, int x1, int y1); /** \brief Count API calls. \param name is the API name */ FUNAMA_API int fuAuthCountWithAPIName(char* name); FUNAMA_API void fuHexagonInitWithPath(const char* lib_directory_path); FUNAMA_API void fuHexagonTearDown(); /** \brief set if use pixel buffer to speed up reading pixel from buffer. \param use,set use or not use. */ FUNAMA_API int fuSetUsePbo(bool use); /** \brief set Set texture loading quality \param quality, 0:high 1:medium 2.low */ FUNAMA_API int fuSetLoadQuality(int quality); /** \brief set if use the output texture for async reading, when use spcified framebuffer for output. \param use,set 1 for use or 0 for not use, not use by default for performance. */ FUNAMA_API int fuSetUseTexAsync(bool use); /** \brief set if force use gl 2. \param use,set 1 for use or 0 for not use. */ FUNAMA_API int fuSetForceUseGL2(int use); /** * \brief HandGestureCallBack,callback with first handgesture type. * \param type, ref to FUAIGESTURETYPE */ typedef void (*HandGestureCallBack)(int type); /** * \brief set callback for handgesture detection. * \param onHandGestureListener, * callback. will override the older one, null for reset callback. * \note this callback will be called with the first hand's type, you should * use fuHandDetectorGetResultNumHands and fuHandDetectorGetResultGestureType * for all info. * \note this callback will be called when calling Render* interface. when use * fuTrackFace*, you should use fuHandDetectorGetResultNumHands and * fuHandDetectorGetResultGestureType for all info. */ FUNAMA_API void fuSetHandGestureCallBack(HandGestureCallBack cb); /** * \brief ItemCallBack,callback. * \param handle, ref to handle for triggered item. * \param type, ref to FUITEMTRIGGERTYPE. */ typedef void (*ItemCallBack)(int handle, int type); /** * \brief set callback for item. * \param item, * handle for time * \param cb, * callback. will override the older one, null for reset callback. * \note this callback will be called when calling Render* interface. */ FUNAMA_API int fuSetItemCallBack(int handle, ItemCallBack cb); /** * \brief prepare GL resource for a list of items in advance * This function needs a GLES 2.0+ context. * \param p_items, points to the list of items * \param n_items, is the number of items */ FUNAMA_API void fuPrepareGLResource(int* p_items, int n_items); /** \brief check prepare gl resource is ready. This function needs a GLES 2.0+ context. \param output, 1 for ready prepared, 0 false, -1 load program binary failed */ FUNAMA_API int fuIsGLPrepared(int* p_items, int n_items); FUNAMA_API int fuGetFaceTransferTexID(); /** * \brief set use async ai inference. * \param use_async, * ture or false. */ FUNAMA_API int fuSetUseAsyncAIInference(int use_async); /** * \brief set use multi buffer. * \param use_multi_gpu_textuer, * ture or false. * \param use_multi_cpu_buffer, * ture or false. */ FUNAMA_API int fuSetUseMultiBuffer(int use_multi_gpu_textuer, int use_multi_cpu_buffer); /** \brief check gl error \return OpenGL error information, 0 for no error */ FUNAMA_API int fuCheckGLError(); /** FaceProcessor related api */ /** \brief Set tracking fov for ai model FaceProcessor. */ FUNAMA_API int fuSetFaceProcessorFov(float fov); /** \brief Get tracking fov of ai model FaceProcessor. */ FUNAMA_API float fuGetFaceProcessorFov(); /** \brief set faceprocessor's face detect mode. when use 1 for video mode, face detect strategy is opimized for no face scenario. In image process scenario, you should set detect mode into 0 image mode. \param mode, 0 for image, 1 for video, 1 by default */ FUNAMA_API int fuSetFaceProcessorDetectMode(int mode); /** \brief set ai model FaceProcessor's minium track face size. \param ratio, ratio with min(w,h). */ FUNAMA_API void fuFaceProcessorSetMinFaceRatio(float ratio); /** \brief set ai model FaceProcessor's landmark quality. \param quality, landmark quality, 0 for low quality, 1 for mediem, 2 for high quality. 1 by default. */ FUNAMA_API void fuFaceProcessorSetFaceLandmarkQuality(int quality); /** \brief set ai model FaceProcessor's detector mode. \param use, 0 for disable detect small face, 1 for enable detect small face */ FUNAMA_API void fuFaceProcessorSetDetectSmallFace(int use); /** \brief set ai model FaceProcessor use capture eye look camera. \param use, 0 for disable, 1 for enable \return zero for failed, one for success */ FUNAMA_API int fuFaceProcessorSetUseCaptureEyeLookCam(int use); /** \brief get ai model FaceProcessor's tracking hair mask with index. \param index, index of fuFaceProcessorGetNumResults. \param mask_width, width of return. \param mask_height, height of return. \return mask data. */ FUNAMA_API const float* fuFaceProcessorGetResultHairMask(int index, int* mask_width, int* mask_height); /** \brief get ai model FaceProcessor's tracking head mask with index. \param index, index of fuFaceProcessorGetNumResults. \param mask_width, width of return. \param mask_height, height of return. \return mask data. */ FUNAMA_API const float* fuFaceProcessorGetResultHeadMask(int index, int* mask_width, int* mask_height); /** \brief get ai model FaceProcessor's tracking face occlusion. \param index, index of fuFaceProcessorGetNumResults. \return zero for no occlusion, one for occlusion, minus one for no tracked face */ FUNAMA_API int fuFaceProcessorGetResultFaceOcclusion(int index); /** \brief get ai model FaceProcessor's face detection confidence score. \param index, index of fuFaceProcessorGetNumResults. \return face detection confidence score. */ FUNAMA_API float fuFaceProcessorGetConfidenceScore(int index); /** \brief get ai model FaceProcessor's tracking face count. \return num of faces. */ FUNAMA_API int fuFaceProcessorGetNumResults(); /** HumanProcessor related api */ /** \brief set humanprocessor's human detect mode. when use 1 for video mode, human detect strategy is opimized for no human scenario. In image process scenario, you should set detect mode into 0 image mode. \param mode, 0 for image, 1 for video, 1 by default */ FUNAMA_API int fuSetHumanProcessorDetectMode(int mode); /** \brief Reset ai model HumanProcessor's tracking state. */ FUNAMA_API void fuHumanProcessorReset(); /** \brief set ai model HumanProcessor's maxinum tracking people. */ FUNAMA_API void fuHumanProcessorSetMaxHumans(int max_humans); /** \param n_buffer_frames(default 5 and > 0): filter buffer frames. \param pos_w(default 0.05 and >= 0): root position filter weight, less pos_w -> smoother. \param angle_w(default 1.2 and >= 0): joint angle filter weight, less angle_w -> smoother. */ FUNAMA_API void fuHumanProcessorSetAvatarAnimFilterParams(int n_buffer_frames, float pos_w, float angle_w); /** \brief set ai model HumanProcessor's tracking fov, use to 3d joint projection. \param fov. */ FUNAMA_API void fuHumanProcessorSetFov(float fov); /** \brief get ai model HumanProcessor's tracking fov, use to 3d joint projection. \return fov */ FUNAMA_API float fuHumanProcessorGetFov(); /** \brief get ai model HumanProcessor's tracking result. \return tracked people number */ FUNAMA_API int fuHumanProcessorGetNumResults(); /** \brief get ai model HumanProcessor's tracking id. \param index, index of fuHumanProcessorGetNumResults \return tracking id */ FUNAMA_API int fuHumanProcessorGetResultTrackId(int index); /** \brief get ai model HumanProcessor's tracking human state with id. \param index is index of fuHumanProcessorGetNumResults \return state, enum of FUAIHUMANSTATE */ FUNAMA_API int fuHumanProcessorGetHumanState(int index); /** \brief get ai model HumanProcessor's tracking gesture types with id. \param index is index of fuHumanProcessorGetNumResults \param size, size of return data. \return gesture types array, [left hand gesture, right hand gesture], enum of FUAIGESTURETYPE */ FUNAMA_API const int* fuHumanProcessorGetGestureTypes(int index, int* size); /** \brief get ai model HumanProcessor's action type with index. \param index, index of fuHumanProcessorGetNumResults \return action type */ FUNAMA_API int fuHumanProcessorGetResultActionType(int index); /** \brief get ai model HumanProcessor's action score with index. \param index, index of fuHumanProcessorGetNumResults \return score */ FUNAMA_API float fuHumanProcessorGetResultActionScore(int index); /** \brief get ai model HumanProcessor's tracking rect with index. \param index, index of fuHumanProcessorGetNumResults \return rect array */ FUNAMA_API const float* fuHumanProcessorGetResultRect(int index); /** \brief get ai model HumanProcessor's tracking 2d joint with index. \param index, index of fuHumanProcessorGetNumResults \param size, size of return data. */ FUNAMA_API const float* fuHumanProcessorGetResultJoint2ds(int index, int* size); /** \brief get ai model HumanProcessor's tracking 3d joint with index. \param index, index of fuHumanProcessorGetNumResults \param size, size of return data. */ FUNAMA_API const float* fuHumanProcessorGetResultJoint3ds(int index, int* size); /** \brief get ai model HumanProcessor's pof2d joint with index.(The joint2ds generated by human driver). \param index, index of fuHumanProcessorGetNumResults \param size, size of return data. */ FUNAMA_API const float* fuHumanProcessorGetResultPofJoint2ds(int index, int* size); /** \brief get ai model HumanProcessor's pof2d joint scores with index. (The joint2ds generated by human driver). \param index, index of fuHumanProcessorGetNumResults \param size, size of return data. */ FUNAMA_API const float* fuHumanProcessorGetResultPofJointScores(int index, int* size); /** \brief enable model HumanProcessor's BVH motion frame output. \param enable: (default is true). */ FUNAMA_API void fuHumanProcessorSetEnableBVHOutput(bool enable); /** \brief set the inplane rotation of model HumanProcessor's BVH output. \param inplane_rot: inplane counter-clock-wise rotation type. */ FUNAMA_API void fuHumanProcessorSetBVHInPlaneRotation( FUAIHUMANROTTYPE inplane_rot); /** \brief set the inplane mirror type of model HumanProcessor's BVH output. \param inplane_mirror_type: inplane mirror type. */ FUNAMA_API void fuHumanProcessorSetBVHInPlaneMirrorType( FUAIHUMANMIRRORTYPE inplane_mirror_type); /** \brief get model HumanProcessor's BVH motiont frame output. \param index: index of fuHumanProcessorGetNumResults \param size: size of return data. */ FUNAMA_API const float* fuHumanProcessorGetResultBVHMotionFrameOutput( int index, int* size); /** \brief get ai model HumanProcessor's tracking full body mask with index. \param index, index of fuHumanProcessorGetNumResults. \param mask_width, width of return. \param mask_height, height of return. \return mask data. */ FUNAMA_API const float* fuHumanProcessorGetResultHumanMask(int index, int* mask_width, int* mask_height); /** \brief calculate action distance. \return score of distance, range [0,1], 1 for fully match. */ FUNAMA_API float fuHumanActionMatchDistance(const float* src_pose, int sz_src, const float* ref_pose, int sz_ref); /** \brief calculate action distance with left and right. \return score of distance, range [0,1], 1 for fully match. */ FUNAMA_API float fuHumanActionMatchLeftRightHandDistance(const float* src_pose, int sz_src, const float* ref_pose, int sz_ref, bool mirror); /** \brief get hand detector's tracking results. \return num of hand tracked. */ FUNAMA_API int fuHandDetectorGetResultNumHands(); /** \brief get hand detector's tracking rect with index. \param index ,index of fuHandDetectorGetResultNumHands. \return rect data, float array with size 4. */ FUNAMA_API const float* fuHandDetectorGetResultHandRect(int index); /** \brief get hand detector's tracking hand gesture type with index. \param index ,index of fuHandDetectorGetResultNumHands. \return gesture type, ref to FUAIGESTURETYPE. */ FUNAMA_API FUAIGESTURETYPE fuHandDetectorGetResultGestureType(int index); /** \brief get hand detector's tracking hand gesture score with index. \param index ,index of fuHandDetectorGetResultNumHands. \return gesture score, range [0,1] */ FUNAMA_API float fuHandDetectorGetResultHandScore(int index); FUNAMA_API void fuSetOutputImageSize(int w, int h); FUNAMA_API void fuSetCacheDirectory(const char* dir); /** \brief cache data manually. */ FUNAMA_API void fuRunCache(); /** \brief record current memory usage. this interface works on android for now. The device must root and run corrspond monitor process. \return zero for failure, one for success */ FUNAMA_API int fuRecordMemoryUsage(const char* tag); /** \brief when application pause calling fuRender, call fuSetRenderPauseState(true) to pause the internal physis update. \param pause ,pause state, if true SDK will pause physis update, and will be turn on in next fuRender call automatically. */ FUNAMA_API void fuSetRenderPauseState(bool pause); /** \brief internal api for profile */ FUNAMA_API int fuProfileGetNumTimers(); FUNAMA_API const char* fuProfileGetTimerName(int index); FUNAMA_API long long fuProfileGetTimerAverage(int index); FUNAMA_API long long fuProfileGetTimerCount(int index); FUNAMA_API long long fuProfileGetTimerMin(int index); FUNAMA_API long long fuProfileGetTimerMax(int index); FUNAMA_API int fuProfileResetAllTimers(); FUNAMA_API void fuSetForcePortraitMode(int mode); // in_format 0->RGB 1->RGBA FUNAMA_API void fuImageBeautyResetPic(void* in_ptr, int w, int h, int format, void* out_ptr, int out_w, int out_h); FUNAMA_API void fuImageBeautyNewPic(); FUNAMA_API void fuSetFaceDelayLeaveFrameNum(int frame_num); FUNAMA_API void fuSetFaceDelayLeaveEnable(bool use); FUNAMA_API void fuSetHumanSegScene(FUAIHUMANSEGSCENETYPE seg_scene); FUNAMA_API void fuSetHandDetectEveryNFramesWhenNoHand(int frame_num); #ifdef __cplusplus } #endif #endif // !CNAMASDK_H