scikit-image
926 строк · 32.1 Кб
1# cython: cdivision=True
2# cython: boundscheck=False
3# cython: nonecheck=False
4# cython: wraparound=False
5
6
7import numpy as np
8cimport numpy as cnp
9from . cimport safe_openmp as openmp
10from .safe_openmp cimport have_openmp
11from libc.stdlib cimport malloc, free
12from libcpp.vector cimport vector
13
14from skimage._shared.interpolation cimport round, fmax, fmin
15
16from cython.parallel import prange
17from ..color import rgb2gray
18from ..transform import integral_image
19import xml.etree.ElementTree as ET
20from ._texture cimport _multiblock_lbp
21import math
22
23cnp.import_array()
24
25# Struct for storing a single detection.
26cdef struct Detection:
27
28int r
29int c
30int width
31int height
32
33
34# Struct for storing cluster of rectangles that represent detections.
35# As the rectangles are dynamically added, the sum of row, col positions,
36# width and heights are stored with the count of rectangles that belong
37# to this cluster. This way, we don't have to store all the rectangles
38# information as array and the average of all detections in a cluster
39# can be easily computed in a constant time.
40cdef struct DetectionsCluster:
41
42int r_sum
43int c_sum
44int width_sum
45int height_sum
46int count
47
48
49# Struct for storing multi-block binary pattern position.
50# Defines the parameters of multi-block binary pattern feature.
51# Read more in skimage.feature.texture.multiblock_lbp.
52cdef struct MBLBP:
53
54Py_ssize_t r
55Py_ssize_t c
56Py_ssize_t width
57Py_ssize_t height
58
59
60# Struct for storing information about trained MBLBP feature.
61# Feature_id contains an index to array where the parameters of MBLBP features
62# are stored using MBLBP struct. Index is used because some stages in cascade
63# can have repeating features. The lut_idx contains an index to a look-up table
64# which gives, depending on the computed value of a feature, an answer whether
65# an object is present in the current detection window. Based on the value of
66# look-up table (0 or 1) positive(right) or negative(left) weight is added to
67# the overall score of a stage.
68cdef struct MBLBPStump:
69
70Py_ssize_t feature_id
71Py_ssize_t lut_idx
72cnp.float32_t left
73cnp.float32_t right
74
75
76# Struct for storing a stage of classifier which itself consists of
77# MBLBPStumps. It has the index that maps to the starting stump and amount of
78# stumps that belong to a stage after this index. In each stage all the stumps
79# are evaluated and their output values( `left` or `right` depending on the
80# input) are summed up and compared to the threshold. If the value is higher
81# than the threshold, the stage is passed and Cascade classifier goes to the
82# next stage. If all the stages are passed, the object is predicted to be
83# present in the input image patch.
84cdef struct Stage:
85
86Py_ssize_t first_idx
87Py_ssize_t amount
88cnp.float32_t threshold
89
90
91cdef vector[Detection] _group_detections(vector[Detection] detections,
92cnp.float32_t intersection_score_threshold=0.5,
93int min_neighbor_number=4):
94"""Group similar detections into a single detection and eliminate weak
95(non-overlapping) detections.
96
97We assume that a true detection is characterized by a high number of
98overlapping detections. Such detections are isolated and gathered into
99one cluster. The average of each cluster is returned. Averaging means
100that the row and column positions of top left corners and the width
101and height parameters of each rectangle in a cluster are used to compute
102values of average rectangle that will represent cluster.
103
104Parameters
105----------
106detections : vector[Detection]
107A cluster of detections.
108min_neighbor_number : int
109Minimum amount of intersecting detections in order for detection
110to be approved by the function.
111intersection_score_threshold : cnp.float32_t
112The minimum value of value of ratio
113(intersection area) / (small rectangle ratio) in order to merge
114two rectangles into one cluster.
115
116Returns
117-------
118output : vector[Detection]
119The grouped detections.
120"""
121
122cdef:
123Detection mean_detection
124vector[DetectionsCluster] clusters
125Py_ssize_t nr_of_clusters
126Py_ssize_t current_detection_nr
127Py_ssize_t current_cluster_nr
128Py_ssize_t nr_of_detections = detections.size()
129Py_ssize_t best_cluster_nr
130bint new_cluster
131cnp.float32_t best_score
132cnp.float32_t intersection_score
133
134# Check if detections array is not empty.
135# Push first detection as first cluster.
136if nr_of_detections:
137clusters.push_back(cluster_from_detection(detections[0]))
138
139for current_detection_nr in range(1, nr_of_detections):
140
141best_score = intersection_score_threshold
142best_cluster_nr = 0
143new_cluster = True
144
145nr_of_clusters = clusters.size()
146
147for current_cluster_nr in range(nr_of_clusters):
148
149mean_detection = mean_detection_from_cluster(
150clusters[current_cluster_nr])
151
152intersection_score = rect_intersection_score(
153detections[current_detection_nr],
154mean_detection)
155
156if intersection_score > best_score:
157
158new_cluster = False
159best_cluster_nr = current_cluster_nr
160best_score = intersection_score
161
162if new_cluster:
163
164clusters.push_back(cluster_from_detection(
165detections[current_detection_nr]))
166else:
167
168clusters[best_cluster_nr] = update_cluster(
169clusters[best_cluster_nr],
170detections[current_detection_nr])
171
172clusters = threshold_clusters(clusters, min_neighbor_number)
173return get_mean_detections(clusters)
174
175
176cdef DetectionsCluster update_cluster(DetectionsCluster cluster,
177Detection detection):
178"""Updated the cluster by adding new detection.
179
180Updates the cluster by adding new detection to it. The added
181detection contributes to the mean value of the cluster.
182
183Parameters
184----------
185cluster : DetectionsCluster
186A cluster of detections.
187detection : Detection
188The detection to be added to cluster.
189
190Returns
191-------
192updated_cluster : DetectionsCluster
193The updated cluster.
194"""
195
196cdef DetectionsCluster updated_cluster = cluster
197
198updated_cluster.r_sum += detection.r
199updated_cluster.c_sum += detection.c
200updated_cluster.width_sum += detection.width
201updated_cluster.height_sum += detection.height
202updated_cluster.count += 1
203
204return updated_cluster
205
206
207cdef Detection mean_detection_from_cluster(DetectionsCluster cluster):
208"""Compute the mean detection from the cluster.
209
210Returns the mean detection computed from the all rectangles that
211belong to current cluster.
212
213Parameters
214----------
215cluster : DetectionsCluster
216A cluster of detections.
217
218Returns
219-------
220mean : Detection
221The mean detection.
222"""
223
224cdef Detection mean
225
226mean.r = cluster.r_sum / cluster.count
227mean.c = cluster.c_sum / cluster.count
228mean.width = cluster.width_sum / cluster.count
229mean.height = cluster.height_sum / cluster.count
230
231return mean
232
233
234cdef DetectionsCluster cluster_from_detection(Detection detection):
235"""Create a cluster from a single detection.
236
237Creates a cluster with count one and values that are taken from detection.
238
239Parameters
240----------
241detection : Detection
242A single detection.
243
244Returns
245-------
246new_cluster : DetectionsCluster
247The cluster struct that was created from detection.
248"""
249
250cdef DetectionsCluster new_cluster
251
252new_cluster.r_sum = detection.r
253new_cluster.c_sum = detection.c
254new_cluster.width_sum = detection.width
255new_cluster.height_sum = detection.height
256new_cluster.count = 1
257
258return new_cluster
259
260
261cdef vector[DetectionsCluster] threshold_clusters(vector[DetectionsCluster] clusters,
262int count_threshold):
263"""Threshold clusters depending on the amount of rectangles in them.
264
265Only the clusters with the amount of rectangles greater than the threshold
266are left.
267
268Parameters
269----------
270clusters : vector[DetectionsCluster]
271Array of rectangles clusters.
272count_threshold : int
273The threshold number of rectangles that is used.
274
275Returns
276-------
277output : vector[DetectionsCluster]
278The array of clusters that satisfy the threshold criteria.
279"""
280
281cdef:
282Py_ssize_t clusters_amount
283Py_ssize_t current_cluster
284vector[DetectionsCluster] output
285
286clusters_amount = clusters.size()
287
288for current_cluster in range(clusters_amount):
289
290if clusters[current_cluster].count >= count_threshold:
291output.push_back(clusters[current_cluster])
292
293return output
294
295
296cdef vector[Detection] get_mean_detections(vector[DetectionsCluster] clusters):
297"""Computes the mean of each cluster of detections in the array.
298
299Each cluster is replaced with a single detection that represents
300the mean of the cluster, computed from the rectangles that belong
301to the cluster.
302
303Parameters
304----------
305clusters : vector[DetectionsCluster]
306Array of rectangles clusters.
307
308Returns
309-------
310detections : vector[Detection]
311The array of mean detections. Each detection represent mean
312for one cluster.
313"""
314
315cdef:
316Py_ssize_t current_cluster
317Py_ssize_t clusters_amount = clusters.size()
318vector[Detection] detections
319
320detections.resize(clusters_amount)
321
322for current_cluster in range(clusters_amount):
323detections[current_cluster] = mean_detection_from_cluster(clusters[current_cluster])
324
325return detections
326
327
328cdef cnp.float32_t rect_intersection_area(Detection rect_a, Detection rect_b):
329"""Computes the intersection area of two rectangles.
330
331
332Parameters
333----------
334rect_a : Detection
335Struct of the first rectangle.
336rect_a : Detection
337Struct of the second rectangle.
338
339Returns
340-------
341result : cnp.float32_t
342The intersection score area.
343"""
344
345cdef:
346Py_ssize_t r_a_1 = rect_a.r
347Py_ssize_t r_a_2 = rect_a.r + rect_a.height
348Py_ssize_t c_a_1 = rect_a.c
349Py_ssize_t c_a_2 = rect_a.c + rect_a.width
350
351Py_ssize_t r_b_1 = rect_b.r
352Py_ssize_t r_b_2 = rect_b.r + rect_b.height
353Py_ssize_t c_b_1 = rect_b.c
354Py_ssize_t c_b_2 = rect_b.c + rect_b.width
355
356return (fmax(0, fmin(c_a_2, c_b_2) - fmax(c_a_1, c_b_1)) *
357fmax(0, fmin(r_a_2, r_b_2) - fmax(r_a_1, r_b_1)))
358
359
360cdef cnp.float32_t rect_intersection_score(Detection rect_a, Detection rect_b):
361"""Computes the intersection score of two rectangles.
362
363The score is computed by dividing the intersection area of rectangles
364by the area of the rectangle with the smallest area.
365
366Parameters
367----------
368rect_a : Detection
369Struct of the first rectangle.
370rect_a : Detection
371Struct of the second rectangle.
372
373Returns
374-------
375result : cnp.float32_t
376The intersection score. The number in the interval ``[0, 1]``.
3771 means rectangles fully intersect, 0 means they don't.
378"""
379
380cdef:
381cnp.float32_t intersection_area
382cnp.float32_t smaller_area
383cnp.float32_t area_a = rect_a.height * rect_a.width
384cnp.float32_t area_b = rect_b.height * rect_b.width
385
386intersection_area = rect_intersection_area(rect_a, rect_b)
387
388smaller_area = area_a if area_b > area_a else area_b
389
390return intersection_area / smaller_area
391
392
393cdef class Cascade:
394"""Class for cascade of classifiers that is used for object detection.
395
396The main idea behind cascade of classifiers is to create classifiers
397of medium accuracy and ensemble them into one strong classifier
398instead of just creating a strong one. The second advantage of cascade
399classifier is that easy examples can be classified only by evaluating
400some of the classifiers in the cascade, making the process much faster
401than the process of evaluating a one strong classifier.
402
403Attributes
404----------
405eps : cnp.float32_t
406Accuracy parameter. Increasing it, makes the classifier detect less
407false positives but at the same time the false negative score increases.
408stages_number : Py_ssize_t
409Amount of stages in a cascade. Each cascade consists of stumps i.e.
410trained features.
411stumps_number : Py_ssize_t
412The overall amount of stumps in all the stages of cascade.
413features_number : Py_ssize_t
414The overall amount of different features used by cascade.
415Two stumps can use the same features but has different trained
416values.
417window_width : Py_ssize_t
418The width of a detection window that is used. Objects smaller than
419this window can't be detected.
420window_height : Py_ssize_t
421The height of a detection window.
422stages : Stage*
423A pointer to the C array that stores stages information using a
424Stage struct.
425features : MBLBP*
426A pointer to the C array that stores MBLBP features using an MBLBP
427struct.
428LUTs : cnp.uint32_t*
429A pointer to the C array with look-up tables that are used by trained
430MBLBP features (MBLBPStumps) to evaluate a particular region.
431
432Notes
433-----
434The cascade approach was first described by Viola and Jones [1]_, [2]_,
435although these initial publications used a set of Haar-like features. This
436implementation instead uses multi-scale block local binary pattern (MB-LBP)
437features [3]_.
438
439References
440----------
441.. [1] Viola, P. and Jones, M. "Rapid object detection using a boosted
442cascade of simple features," In: Proceedings of the 2001 IEEE
443Computer Society Conference on Computer Vision and Pattern
444Recognition. CVPR 2001, pp. I-I.
445:DOI:`10.1109/CVPR.2001.990517`
446.. [2] Viola, P. and Jones, M.J, "Robust Real-Time Face Detection",
447International Journal of Computer Vision 57, 137–154 (2004).
448:DOI:`10.1023/B:VISI.0000013087.49260.fb`
449.. [3] Liao, S. et al. Learning Multi-scale Block Local Binary Patterns for
450Face Recognition. International Conference on Biometrics (ICB),
4512007, pp. 828-837. In: Lecture Notes in Computer Science, vol 4642.
452Springer, Berlin, Heidelberg.
453:DOI:`10.1007/978-3-540-74549-5_87`
454"""
455
456cdef:
457public cnp.float32_t eps
458public Py_ssize_t stages_number
459public Py_ssize_t stumps_number
460public Py_ssize_t features_number
461public Py_ssize_t window_width
462public Py_ssize_t window_height
463Stage* stages
464MBLBPStump* stumps
465MBLBP* features
466cnp.uint32_t* LUTs
467
468def __dealloc__(self):
469
470# Free the memory that was used for c-arrays.
471free(self.stages)
472free(self.stumps)
473free(self.features)
474free(self.LUTs)
475
476def __init__(self, xml_file, eps=1e-5):
477"""Initialize cascade classifier.
478
479Parameters
480----------
481xml_file : file's path or file's object
482A file in a OpenCv format from which all the cascade classifier's
483parameters are loaded.
484eps : cnp.float32_t
485Accuracy parameter. Increasing it, makes the classifier
486detect less false positives but at the same time the false
487negative score increases.
488
489"""
490
491self._load_xml(xml_file, eps)
492
493cdef bint classify(self, cnp.float32_t[:, ::1] int_img, Py_ssize_t row,
494Py_ssize_t col, cnp.float32_t scale) noexcept nogil:
495"""Classify the provided image patch i.e. check if the classifier
496detects an object in the given image patch.
497
498The function takes the original window size that is stored in the
499trained file, scales it and places in the specified part of the
500provided image, carries out classification and gives a binary result.
501
502Parameters
503----------
504int_img : cnp.float32_t[:, ::1]
505Memory-view to integral image.
506row : Py_ssize_t
507Row coordinate of the rectangle in the given image to classify.
508Top left corner of window.
509col : Py_ssize_t
510Column coordinate of the rectangle in the given image to classify.
511Top left corner of window.
512scale : cnp.float32_t
513The scale by which the search window is multiplied.
514After multiplication the result is rounded to the lowest integer.
515
516Returns
517-------
518result : int
519The binary output that takes only 0 or 1. Gives 1 if the classifier
520detects the object in specified region and 0 otherwise.
521"""
522
523cdef:
524cnp.float32_t stage_points
525int lbp_code
526int bit
527Py_ssize_t stage_number
528Py_ssize_t weak_classifier_number
529Py_ssize_t first_stump_idx
530Py_ssize_t lut_idx
531Py_ssize_t r, c, width, height
532Stage current_stage
533MBLBPStump current_stump
534MBLBP current_feature
535
536
537for stage_number in range(self.stages_number):
538
539current_stage = self.stages[stage_number]
540first_stump_idx = current_stage.first_idx
541stage_points = 0
542
543for weak_classifier_number in range(current_stage.amount):
544
545current_stump = self.stumps[first_stump_idx +
546weak_classifier_number]
547
548current_feature = self.features[current_stump.feature_id]
549
550r = <Py_ssize_t>(current_feature.r * scale)
551c = <Py_ssize_t>(current_feature.c * scale)
552width = <Py_ssize_t>(current_feature.width * scale)
553height = <Py_ssize_t>(current_feature.height * scale)
554
555
556lbp_code = _multiblock_lbp(int_img, row + r, col + c,
557width, height)
558
559lut_idx = current_stump.lut_idx
560
561bit = (self.LUTs[lut_idx + (lbp_code >> 5)] >> (lbp_code & 31)) & 1
562
563stage_points += current_stump.left if bit else current_stump.right
564
565if stage_points < (current_stage.threshold - self.eps):
566
567return False
568
569return True
570
571def _get_valid_scale_factors(self, min_size, max_size, scale_step):
572"""Get the valid scale multipliers for the original window size.
573
574The function takes the minimal size of window and maximum size of
575window as interval and finds all the multipliers that will give the
576windows which sizes will be not less than the min_size and not bigger
577than the max_size.
578
579Parameters
580----------
581min_size : tuple (int, int)
582Minimum size of window for which to search the scale factor.
583max_size : tuple (int, int)
584Maximum size of window for which to search the scale factor.
585scale_step : cnp.float32_t
586The scale by which the search window is multiplied
587on each iteration.
588
589Returns
590-------
591scale_factors : 1-D cnp.float32_ts ndarray
592The scale factors that give the window sizes that are in the
593specified interval after multiplying the search window.
594"""
595
596current_size = np.array((self.window_height, self.window_width))
597min_size = np.array(min_size, dtype=np.float32)
598max_size = np.array(max_size, dtype=np.float32)
599
600row_power_max = math.log(max_size[0]/current_size[0], scale_step)
601col_power_max = math.log(max_size[1]/current_size[1], scale_step)
602
603row_power_min = math.log(min_size[0]/current_size[0], scale_step)
604col_power_min = math.log(min_size[1]/current_size[1], scale_step)
605
606mn = max(row_power_min, col_power_min, 0)
607mx = min(row_power_max, col_power_max)
608
609powers = np.arange(mn, mx)
610
611scale_factors = np.power(scale_step, powers, dtype=np.float32)
612
613return scale_factors
614
615def _get_contiguous_integral_image(self, img):
616"""Get a c-contiguous array that represents the integral image.
617
618The function converts the input image into the integral image in
619a format that is suitable for work of internal functions of
620the cascade classifier class. The function converts the image
621to gray-scale float representation, computes the integral image
622and makes it c-contiguous.
623
624Parameters
625----------
626img : 2-D or 3-D ndarray
627Ndarray that represents the input image.
628
629Returns
630-------
631int_img : 2-D floats ndarray
632C-contiguous integral image of the input image.
633"""
634if len(img.shape) > 2:
635img = rgb2gray(img)
636int_img = integral_image(img)
637int_img = np.ascontiguousarray(int_img, dtype=np.float32)
638
639return int_img
640
641
642def detect_multi_scale(self, img, cnp.float32_t scale_factor,
643cnp.float32_t step_ratio, min_size, max_size,
644min_neighbor_number=4,
645intersection_score_threshold=0.5):
646"""Search for the object on multiple scales of input image.
647
648The function takes the input image, the scale factor by which the
649searching window is multiplied on each step, minimum window size
650and maximum window size that specify the interval for the search
651windows that are applied to the input image to detect objects.
652
653Parameters
654----------
655img : 2-D or 3-D ndarray
656Ndarray that represents the input image.
657scale_factor : cnp.float32_t
658The scale by which searching window is multiplied on each step.
659step_ratio : cnp.float32_t
660The ratio by which the search step in multiplied on each scale
661of the image. 1 represents the exaustive search and usually is
662slow. By setting this parameter to higher values the results will
663be worse but the computation will be much faster. Usually, values
664in the interval [1, 1.5] give good results.
665min_size : tuple (int, int)
666Minimum size of the search window.
667max_size : tuple (int, int)
668Maximum size of the search window.
669min_neighbor_number : int
670Minimum amount of intersecting detections in order for detection
671to be approved by the function.
672intersection_score_threshold : cnp.float32_t
673The minimum value of value of ratio
674(intersection area) / (small rectangle ratio) in order to merge
675two detections into one.
676
677Returns
678-------
679output : list of dicts
680Dict have form {'r': int, 'c': int, 'width': int, 'height': int},
681where 'r' represents row position of top left corner of detected
682window, 'c' - col position, 'width' - width of detected window,
683'height' - height of detected window.
684"""
685
686cdef:
687Py_ssize_t max_row
688Py_ssize_t max_col
689Py_ssize_t current_height
690Py_ssize_t current_width
691Py_ssize_t current_row
692Py_ssize_t current_col
693Py_ssize_t current_step
694Py_ssize_t number_of_scales
695Py_ssize_t img_height
696Py_ssize_t img_width
697Py_ssize_t scale_number
698Py_ssize_t window_height = self.window_height
699Py_ssize_t window_width = self.window_width
700int result
701cnp.float32_t[::1] scale_factors
702cnp.float32_t[:, ::1] int_img
703cnp.float32_t current_scale_factor
704vector[Detection] output
705Detection new_detection
706
707int_img = self._get_contiguous_integral_image(img)
708img_height = int_img.shape[0]
709img_width = int_img.shape[1]
710
711scale_factors = self._get_valid_scale_factors(min_size,
712max_size, scale_factor)
713number_of_scales = scale_factors.shape[0]
714
715# Initialize lock to enable thread-safe writes to the array
716# in concurrent loop.
717cdef openmp.omp_lock_t mylock
718
719if have_openmp:
720openmp.omp_init_lock(&mylock)
721
722
723# As the amount of work between the threads is not equal we
724# use `dynamic` schedule which enables them to use computing
725# power on demand.
726for scale_number in prange(0, number_of_scales,
727schedule='dynamic', nogil=True):
728
729current_scale_factor = scale_factors[scale_number]
730current_step = <Py_ssize_t>round(current_scale_factor * step_ratio)
731current_height = <Py_ssize_t>(window_height * current_scale_factor)
732current_width = <Py_ssize_t>(window_width * current_scale_factor)
733max_row = img_height - current_height
734max_col = img_width - current_width
735
736# Check if scaled detection window fits in image.
737if (max_row < 0) or (max_col < 0):
738continue
739
740current_row = 0
741current_col = 0
742
743while current_row < max_row:
744while current_col < max_col:
745
746result = self.classify(int_img, current_row,
747current_col,
748scale_factors[scale_number])
749
750if result:
751
752new_detection.r = current_row
753new_detection.c = current_col
754new_detection.width = current_width
755new_detection.height = current_height
756
757if have_openmp:
758openmp.omp_set_lock(&mylock)
759
760output.push_back(new_detection)
761
762if have_openmp:
763openmp.omp_unset_lock(&mylock)
764
765current_col = current_col + current_step
766
767current_row = current_row + current_step
768current_col = 0
769
770if have_openmp:
771openmp.omp_destroy_lock(&mylock)
772
773return list(_group_detections(output, intersection_score_threshold,
774min_neighbor_number))
775
776def _load_xml(self, xml_file, eps=1e-5):
777"""Load the parameters of cascade classifier into the class.
778
779The function takes the file with the parameters that represent
780trained cascade classifier and loads them into class for later
781use.
782
783Parameters
784----------
785xml_file : filename or file object
786File that contains the cascade classifier.
787eps : cnp.float32_t
788Accuracy parameter. Increasing it, makes the classifier
789detect less false positives but at the same time the false
790negative score increases.
791
792"""
793
794cdef:
795Stage* stages_carr
796MBLBPStump* stumps_carr
797MBLBP* features_carr
798cnp.uint32_t* LUTs_carr
799
800cnp.float32_t stage_threshold
801
802Py_ssize_t stage_number
803Py_ssize_t stages_number
804Py_ssize_t window_height
805Py_ssize_t window_width
806
807Py_ssize_t weak_classifiers_amount
808Py_ssize_t weak_classifier_number
809
810Py_ssize_t feature_number
811Py_ssize_t features_number
812Py_ssize_t stump_lut_idx
813Py_ssize_t stump_idx
814Py_ssize_t i
815
816cnp.uint32_t[::1] lut
817
818MBLBP new_feature
819MBLBPStump new_stump
820Stage new_stage
821
822tree = ET.parse(xml_file)
823
824# Load entities.
825features = tree.find('.//features')
826stages = tree.find('.//stages')
827
828# Get the respective amounts.
829stages_number = int(tree.find('.//stageNum').text)
830window_height = int(tree.find('.//height').text)
831window_width = int(tree.find('.//width').text)
832features_number = len(features)
833
834# Count the stumps.
835stumps_number = 0
836for stage_number in range(stages_number):
837current_stage = stages[stage_number]
838weak_classifiers_amount = int(current_stage.find('maxWeakCount').text)
839stumps_number += weak_classifiers_amount
840
841# Allocate memory for data.
842features_carr = <MBLBP*>malloc(features_number * sizeof(MBLBP))
843stumps_carr = <MBLBPStump*>malloc(stumps_number * sizeof(MBLBPStump))
844stages_carr = <Stage*>malloc(stages_number*sizeof(Stage))
845# Each look-up table consists of 8 u-int numbers.
846LUTs_carr = <cnp.uint32_t*>malloc(8 * stumps_number *
847sizeof(cnp.uint32_t))
848
849# Check if memory was allocated.
850if not (features_carr and stumps_carr and stages_carr and LUTs_carr):
851free(features_carr)
852free(stumps_carr)
853free(stages_carr)
854free(LUTs_carr)
855raise MemoryError("Failed to allocate memory while parsing XML.")
856
857# Parse and load features in memory.
858for feature_number in range(features_number):
859params = features[feature_number][0].text.split()
860# list() is for Python3 fix here
861params = list(map(lambda x: int(x), params))
862new_feature = MBLBP(params[1], params[0], params[2], params[3])
863features_carr[feature_number] = new_feature
864
865stump_lut_idx = 0
866stump_idx = 0
867
868# Parse and load stumps, stages.
869for stage_number in range(stages_number):
870
871current_stage = stages[stage_number]
872
873# Parse and load current stage.
874stage_threshold = float(current_stage.find('stageThreshold').text)
875weak_classifiers_amount = int(current_stage.find('maxWeakCount').text)
876new_stage = Stage(stump_idx, weak_classifiers_amount,
877stage_threshold)
878stages_carr[stage_number] = new_stage
879
880weak_classifiers = current_stage.find('weakClassifiers')
881
882for weak_classifier_number in range(weak_classifiers_amount):
883
884current_weak_classifier = weak_classifiers[weak_classifier_number]
885
886# Stump's leaf values. First negative if image is probably not
887# a face. Second positive if image is probably a face.
888leaf_values = current_weak_classifier.find('leafValues').text
889# list() is for Python3 fix here
890leaf_values = list(map(lambda x: float(x), leaf_values.split()))
891
892# Extract the elements only starting from second.
893# First two are useless
894internal_nodes = current_weak_classifier.find('internalNodes')
895internal_nodes = internal_nodes.text.split()[2:]
896
897# Extract the feature number and respective parameters.
898# The MBLBP position and size.
899feature_number = int(internal_nodes[0])
900# list() is for Python3 fix here
901lut_array = list(map(lambda x: int(x), internal_nodes[1:]))
902# Cast via astype to avoid warning about integer wraparound.
903# see: https://github.com/scikit-image/scikit-image/issues/6638
904lut = np.asarray(lut_array).astype(np.uint32)
905
906# Copy array to the main LUT array
907for i in range(8):
908LUTs_carr[stump_lut_idx + i] = lut[i]
909
910new_stump = MBLBPStump(feature_number, stump_lut_idx,
911leaf_values[0], leaf_values[1])
912stumps_carr[stump_idx] = new_stump
913
914stump_lut_idx += 8
915stump_idx += 1
916
917self.eps = eps
918self.window_height = window_height
919self.window_width = window_width
920self.features = features_carr
921self.stumps = stumps_carr
922self.stages = stages_carr
923self.LUTs = LUTs_carr
924self.stages_number = stages_number
925self.features_number = features_number
926self.stumps_number = stumps_number
927