c# - Video face recognition and comparing with the image using Emgu 3.2.0 -


dear developers. it's first time ask here.

last week got task supervisor: develop software compare visitors' faces appearing on camera photos member-cards. after deep research, seems impossible complete 1 photo, that's not point. use c# , emgu library , wrote working code problem accuracy. tried use all: "emgu.cv.eigenfacerecognizer", "emgu.cv.fisherfacerecognizer" , "emgu.cv.lbphfacerecognizer" of them make lot of misrecognition. after tried upload 10 photos of european self , 10 more of chinese classmate - shows name , "eigen face" blinking both of our names seeing him. far fixed errors , bugs myself, right there no error , don't know do. thank attention.

this main class:

using system; using system.collections.generic; using system.componentmodel; using system.data; using system.drawing; using system.drawing.imaging; using system.linq; using system.text; using system.windows.forms;  using system.threading; using system.threading.tasks; using system.io; using system.xml; using system.runtime.interopservices; using system.security.principal; using microsoft.win32.safehandles;  using emgu.cv; using emgu.cv.structure; using emgu.cv.cvenum; using emgu.util; using emgu.cv.ui;  namespace ilovewinforms {     public partial class form1 : form     {         public form1()         {             initializecomponent();         //load of previus trainned faces , labels each image         if (eigen_recog.istrained)         {             message_bar.text = "training data loaded";         }         else         {             message_bar.text = "no training data found, please train program using train menu option";         }          face = new cascadeclassifier("haarcascade_frontalface_default.xml");         enc_parameters.param[0] = enc;         image_encoder_jpg = getencoder(imageformat.jpeg);     }      private void form1_load(object sender, eventargs e)     {         try         {             capturecam = new capture();         }         catch (nullreferenceexception exception)         {             messagebox.show(exception.message);             return;         }         application.idle += new eventhandler(broadcastfunction);         //application.idle += new eventhandler(photograbber);         capturingprocess = true;     }      #region detection     #region detection variables     //******************************detection global variables*******************************************//      capture capturecam = null; //instance capture using webcam     bool capturingprocess = false; //boolean stating capturing process status     image<bgr, byte> imgorg; //image type rgb (or bgr in open cv)         image<gray, byte> result, trainedface = null; //used store result image , trained face      private static readonly cascadeclassifier _cascadeclassifier = new cascadeclassifier("haarcascade_frontalface_default.xml");      //classifier default training location     classifier_train eigen_recog = new classifier_train();      //mcvfont font = new mcvfont(font.cv_font_hershey_complex, 0.5, 0.5); //our font writing within frame     //cvinvoke.puttext(img, "hello, world", new system.drawing.point(10, 80), fontface.hersheycomplex, 1.0, new bgr(0, 255, 0).mcvscalar); //our font writing within frame      bitmap[] extfaces;     int faceno = 0;      //***************************************************************************************************//     #endregion      //video capture , broadcast     void broadcastfunction(object sender, eventargs e)     {         imgorg = capturecam.queryframe().toimage<bgr, byte>();         facedetection(imgorg);          //videopicturebox.image = imgorg.tobitmap();                     //if (imgorg == null) return;                        }      void facedetection(image<bgr, byte> imgorg)     {         //convert grayscale          if (imgorg != null)         {             gray_frame = imgorg.convert<gray, byte>();             //face detector             rectangle[] facesdetected = face.detectmultiscale(gray_frame, 1.2, 10, new size(50, 50), size.empty);              //action each element detected             parallel.for(0, facesdetected.length, =>             {                 try                 {                     facesdetected[i].x += (int)(facesdetected[i].height * 0.15);                     facesdetected[i].y += (int)(facesdetected[i].width * 0.22);                     facesdetected[i].height -= (int)(facesdetected[i].height * 0.3);                     facesdetected[i].width -= (int)(facesdetected[i].width * 0.35);                      result = imgorg.copy(facesdetected[i]).convert<gray, byte>().resize(100, 100, emgu.cv.cvenum.inter.cubic);                     result._equalizehist();                     //draw face detected in 0th (gray) channel blue color                     imgorg.draw(facesdetected[i], new bgr(color.blue), 2);                      if (eigen_recog.istrained)                     {                         string name = eigen_recog.recognise(result);                         int match_value = (int)eigen_recog.get_eigen_distance;                          //draw label each face detected , recognized                         //imgorg.draw(name + " ", ref font, new point(facesdetected[i].x - 2, facesdetected[i].y - 2), new bgr(color.lightgreen));                         cvinvoke.puttext(imgorg, name + " ", new system.drawing.point(10, 80), fontface.hersheycomplex, 1.0, new bgr(255, 0, 0).mcvscalar);                         add_face_found(result, name, match_value);                     }                  }                 catch                 {                     //do nothing parrellel loop buggy                     //no action error useless, error in                      //no data being there process , occurss sporadically                  }             });             //show faces procesed , recognized             videopicturebox.image = imgorg.tobitmap();         }     }      //add picture box , label panel each face     int faces_count = 0;     int faces_panel_y = 0;     int faces_panel_x = 0;      void clear_faces_found()     {         this.faces_found_panel.controls.clear();         faces_count = 0;         faces_panel_y = 0;         faces_panel_x = 0;     }      void add_face_found(image<gray, byte> img_found, string name_person, int match_value)     {         picturebox pi = new picturebox();         pi.location = new point(faces_panel_x, faces_panel_y);         pi.height = 80;         pi.width = 80;         pi.sizemode = pictureboxsizemode.stretchimage;         pi.image = img_found.tobitmap();         label lb = new label();         lb.text = name_person + " " + match_value.tostring();         lb.location = new point(faces_panel_x, faces_panel_y + 80);         lb.width = 80;         lb.height = 15;          faces_found_panel.controls.add(pi);         faces_found_panel.controls.add(lb);         faces_count++;         if (faces_count == 2)         {             faces_panel_x = 0;             faces_panel_y += 100;             faces_count = 0;         }         else faces_panel_x += 85;          if (faces_found_panel.controls.count > 10)         {             clear_faces_found();         }      }     #endregion       #region training      #region training variables     //******************************recognition global variables*****************************************//        image<gray, byte> gray_frame = null;      //classifier     cascadeclassifier face = new cascadeclassifier("haarcascade_frontalface_default.xml");      //saving jpg     list<image<gray, byte>> imagestowrite = new list<image<gray, byte>>();     encoderparameters enc_parameters = new encoderparameters(1);     encoderparameter enc = new encoderparameter(system.drawing.imaging.encoder.quality, 100);     imagecodecinfo image_encoder_jpg;      //saving xaml data file     list<string> namestowrite = new list<string>();     list<string> namesforfile = new list<string>();     xmldocument docu = new xmldocument();      //variables     form1 parent;     private image<bgr, byte> img;      //for aquiring 10 images in row     list<image<gray, byte>> resultimages = new list<image<gray, byte>>();     int results_list_pos = 0;     int num_faces_to_aquire = 10;     bool record = false;      //***************************************************************************************************//     #endregion     void photograbber(object sender, eventargs e)     {         //get current frame form capture device         //currentframe = grabber.queryframe().resize(320, 240, emgu.cv.cvenum.inter.cv_inter_cubic);          // converting master image bitmap         bitmap masterimage = (bitmap)photopicturebox.image;          // normalizing grayscale         img = new image<bgr, byte>(masterimage);          //convert grayscale         if (img != null)         {             gray_frame = img.convert<gray, byte>();              //face detector             //mcvavgcomp[][] facesdetected = gray_frame.detecthaarcascade(face, 1.2, 10, emgu.cv.cvenum.haar_detection_type.do_canny_pruning, new size(20, 20)); //old method             rectangle[] facesdetected = face.detectmultiscale(gray_frame, 1.2, 10, new size(50, 50), size.empty);              //action each element detected             (int = 0; < facesdetected.length; i++)// (rectangle face_found in facesdetected)             {                 //this focus in on face haar results not perfect remove majoriy                 //of background noise                 facesdetected[i].x += (int)(facesdetected[i].height * 0.15);                 facesdetected[i].y += (int)(facesdetected[i].width * 0.22);                 facesdetected[i].height -= (int)(facesdetected[i].height * 0.3);                 facesdetected[i].width -= (int)(facesdetected[i].width * 0.35);                  result = img.copy(facesdetected[i]).convert<gray, byte>().resize(100, 100, emgu.cv.cvenum.inter.cubic);                 result._equalizehist();                 facepicturebox.image = result.tobitmap();                 //draw face detected in 0th (gray) channel blue color                 img.draw(facesdetected[i], new bgr(color.blue), 2);              }                    photopicturebox.image = img.tobitmap();         }     }      private bool save_training_data(image face_data)     {         try         {             random rand = new random();             bool file_create = true;             string facename = "face_" + nametextbox.text + "_" + rand.next().tostring() + ".jpg";             while (file_create)             {                  if (!file.exists(application.startuppath + "/trainedfaces/" + facename))                 {                     file_create = false;                 }                 else                 {                     facename = "face_" + nametextbox.text + "_" + rand.next().tostring() + ".jpg";                 }             }               if (directory.exists(application.startuppath + "/trainedfaces/"))             {                 face_data.save(application.startuppath + "/trainedfaces/" + facename, imageformat.jpeg);             }             else             {                 directory.createdirectory(application.startuppath + "/trainedfaces/");                 face_data.save(application.startuppath + "/trainedfaces/" + facename, imageformat.jpeg);             }             if (file.exists(application.startuppath + "/trainedfaces/trainedlabels.xml"))             {                 //file.appendalltext(application.startuppath + "/trainedfaces/trainedlabels.txt", name_person.text + "\n\r");                 bool loading = true;                 while (loading)                 {                     try                     {                         docu.load(application.startuppath + "/trainedfaces/trainedlabels.xml");                         loading = false;                     }                     catch                     {                         docu = null;                         docu = new xmldocument();                         thread.sleep(10);                     }                 }                  //get root element                 xmlelement root = docu.documentelement;                  xmlelement face_d = docu.createelement("face");                 xmlelement name_d = docu.createelement("name");                 xmlelement file_d = docu.createelement("file");                  //add values each nodes                 //name.value = textboxname.text;                 //age.innertext = textboxage.text;                 //gender.innertext = textboxgender.text;                 name_d.innertext = nametextbox.text;                 file_d.innertext = facename;                  //construct person element                 //person.attributes.append(name);                 face_d.appendchild(name_d);                 face_d.appendchild(file_d);                  //add new person element end of root element                 root.appendchild(face_d);                  //save document                 docu.save(application.startuppath + "/trainedfaces/trainedlabels.xml");                 //xmlelement child_element = docu.createelement("face");                 //docu.appendchild(child_element);                 //docu.save("trainedlabels.xml");             }             else             {                 filestream fs_face = file.openwrite(application.startuppath + "/trainedfaces/trainedlabels.xml");                 using (xmlwriter writer = xmlwriter.create(fs_face))                 {                     writer.writestartdocument();                     writer.writestartelement("faces_for_training");                      writer.writestartelement("face");                     writer.writeelementstring("name", nametextbox.text);                     writer.writeelementstring("file", facename);                     writer.writeendelement();                      writer.writeendelement();                     writer.writeenddocument();                 }                 fs_face.close();             }              return true;         }         catch (exception ex)         {             return false;         }      }              //delete old training data deleting folder     private void deletebutton_click(object sender, eventargs e)     {         if (directory.exists(application.startuppath + "/trainedfaces/"))         {             directory.delete(application.startuppath + "/trainedfaces/", true);             directory.createdirectory(application.startuppath + "/trainedfaces/");         }     }      private void addbutton_click(object sender, eventargs e)     {         //if (resultimages.count == num_faces_to_aquire)         //{             if (!save_training_data(facepicturebox.image))             messagebox.show("error", "error in saving file info. training data not saved", messageboxbuttons.ok, messageboxicon.error);         //}      }      private imagecodecinfo getencoder(imageformat format)     {         imagecodecinfo[] codecs = imagecodecinfo.getimagedecoders();         foreach (imagecodecinfo codec in codecs)         {             if (codec.formatid == format.guid)             {                 return codec;             }         }         return null;     } 

and faces training class:

using system; using system.collections.generic; using system.linq; using system.text;  using emgu.cv.ui; using emgu.cv; using emgu.cv.structure; using emgu.cv.cvenum; using emgu.cv.face;  using system.io; using system.xml; using system.runtime.interopservices; using system.threading; using system.windows.forms; using system.xml.serialization; using system.drawing.imaging; using system.drawing;  /// <summary> /// desingned remove training eigenobjectrecognizer code main form /// </summary> class classifier_train : idisposable {      #region variables      //eigen     //eigenobjectrecognizer recognizer;     facerecognizer recognizer;      //training variables     list<image<gray, byte>> trainingimages = new list<image<gray, byte>>();//images     //todo: see if can combined in ditionary format remove support old data     list<string> names_list = new list<string>(); //labels     list<int> names_list_id = new list<int>();     int conttrain, numlabels;     float eigen_distance = 0;     string eigen_label;     int eigen_threshold = 2000;      //class variables     string error;     bool _istrained = false;      //"emgu.cv.eigenfacerecognizer"     //"emgu.cv.fisherfacerecognizer"     //"emgu.cv.lbphfacerecognizer"      public string recognizer_type = "emgu.cv.eigenfacerecognizer";     #endregion      #region constructors     /// <summary>     /// default constructor, looks in (application.startuppath + "\\trainedfaces") traing data.     /// </summary>     public classifier_train()     {         _istrained = loadtrainingdata(application.startuppath + "\\trainedfaces");     }      /// <summary>     /// takes string input different location training data     /// </summary>     /// <param name="training_folder"></param>     public classifier_train(string training_folder)     {         _istrained = loadtrainingdata(training_folder);     }     #endregion      #region public     /// <summary>     /// retrains recognizer witout resetting variables recognizer type.     /// </summary>     /// <returns></returns>     public bool retrain()     {         return _istrained = loadtrainingdata(application.startuppath + "\\trainedfaces");     }     /// <summary>     /// retrains recognizer witout resetting variables recognizer type.     /// takes string input different location training data.     /// </summary>     /// <returns></returns>     public bool retrain(string training_folder)     {         return _istrained = loadtrainingdata(training_folder);     }      /// <summary>     /// <para>return(true): if training data has been located , eigen recogniser has been trained</para>     /// <para>return(false): if no training data has been located of error in training has occured</para>     /// </summary>     public bool istrained     {         { return _istrained; }     }      /// <summary>     /// recognise grayscale image using trained eigen recogniser     /// </summary>     /// <param name="input_image"></param>     /// <returns></returns>     public string recognise(image<gray, byte> input_image, int eigen_thresh = -1)     {         if (_istrained)         {             facerecognizer.predictionresult er = recognizer.predict(input_image);              if (er.label == -1)             {                 eigen_label = "unknown";                 eigen_distance = 0;                 return eigen_label;             }             else             {                 eigen_label = names_list[er.label];                 eigen_distance = (float)er.distance;                 if (eigen_thresh > -1) eigen_threshold = eigen_thresh;                  //only use post threshold rule if using eigen recognizer                  //since fisher , lbhp threshold set during constructor work correctly                  switch (recognizer_type)                 {                     case ("emgu.cv.eigenfacerecognizer"):                         if (eigen_distance > eigen_threshold) return eigen_label;                         else return "unknown";                     case ("emgu.cv.fisherfacerecognizer"):                     case ("emgu.cv.lbphfacerecognizer"):                     default:                         return eigen_label; //the threshold set in training controls unknowns                 }                 }          }         else return "";     }      /// <summary>     /// returns string containg recognised persons name     /// </summary>     public string get_eigen_label     {                 {             return eigen_label;         }     }      /// <summary>     /// returns float confidence value potential false clasifications     /// </summary>     public float get_eigen_distance     {                 {             //get eigendistance             return eigen_distance;         }     }      /// <summary>     /// returns string contatining error has occured     /// </summary>     public string get_error     {         { return error; }     }       /// <summary>     /// dispose of class call garbage collector     /// </summary>     public void dispose()     {         recognizer = null;         trainingimages = null;         names_list = null;         error = null;         gc.collect();     }      #endregion      #region private     /// <summary>     /// loads traing data given (string) folder location     /// </summary>     /// <param name="folder_location"></param>     /// <returns></returns>     private bool loadtrainingdata(string folder_location)     {         if (file.exists(folder_location + "\\trainedlabels.xml"))         {             try             {                 //message_bar.text = "";                 names_list.clear();                 names_list_id.clear();                 trainingimages.clear();                 filestream filestream = file.openread(folder_location + "\\trainedlabels.xml");                 long filelength = filestream.length;                 byte[] xmlbytes = new byte[filelength];                 filestream.read(xmlbytes, 0, (int)filelength);                 filestream.close();                  memorystream xmlstream = new memorystream(xmlbytes);                  using (xmlreader xmlreader = xmltextreader.create(xmlstream))                 {                     while (xmlreader.read())                     {                         if (xmlreader.isstartelement())                         {                             switch (xmlreader.name)                             {                                 case "name":                                     if (xmlreader.read())                                     {                                         names_list_id.add(names_list.count); //0, 1, 2, 3....                                         names_list.add(xmlreader.value.trim());                                         numlabels += 1;                                     }                                     break;                                 case "file":                                     if (xmlreader.read())                                     {                                         //problem here if trainingg moved                                         trainingimages.add(new image<gray, byte>(application.startuppath + "\\trainedfaces\\" + xmlreader.value.trim()));                                     }                                     break;                             }                         }                     }                 }                 conttrain = numlabels;                  if (trainingimages.toarray().length != 0)                 {                      //eigen face recognizer                     //parameters:                        //      num_components – number of components (read: eigenfaces) kept prinicpal                      //          component analysis. hint: there’s no rule how many components (read: eigenfaces)                      //          should kept reconstruction capabilities. based on input data,                      //          experiment number. keeping 80 components should sufficient.                     //                     //      threshold – threshold applied in prediciton. still has issues work inversly lbh , fisher methods.                     //          if use 0.0 recognizer.predict return -1 or unknown if use 5000 example unknow won't reconised.                     //          in previous versions ignore built in threhold methods , allow match found i.e. double.positiveinfinity                     //          , use eigen distance threshold return elliminate unknowns.                      //                     //note: following causes confusion, sinc 2 rules used.                      //--------------------------------------------------------------------------------------------------------------------------------------                     //eigen uses                     //          0 - x = unknown                     //          > x = recognised                     //                     //fisher , lbph use                     //          0 - x = recognised                     //          > x = unknown                     //                     // x = threshold value                       switch (recognizer_type)                     {                         case ("emgu.cv.lbphfacerecognizer"):                             recognizer = new lbphfacerecognizer(1, 8, 8, 8, 100);//50                             break;                         case ("emgu.cv.fisherfacerecognizer"):                             recognizer = new fisherfacerecognizer(0, 3500);//4000                             break;                         case ("emgu.cv.eigenfacerecognizer"):                         default:                             recognizer = new eigenfacerecognizer(80, double.positiveinfinity);                             break;                     }                      recognizer.train(trainingimages.toarray(), names_list_id.toarray());                     // recognizer_type = recognizer.gettype();                     // string v = recognizer.tostring(); //emgu.cv.fisherfacerecognizer || emgu.cv.eigenfacerecognizer || emgu.cv.lbphfacerecognizer                      return true;                 }                 else return false;             }             catch (exception ex)             {                 error = ex.tostring();                 return false;             }         }         else return false;     }      #endregion } 


Comments

Popular posts from this blog

Is there a better way to structure post methods in Class Based Views -

performance - Why is XCHG reg, reg a 3 micro-op instruction on modern Intel architectures? -

jquery - Responsive Navbar with Sub Navbar -