scikit-image
1355 строк · 44.5 Кб
1import functools
2import math
3from itertools import combinations_with_replacement
4
5import numpy as np
6from scipy import ndimage as ndi
7from scipy import spatial, stats
8
9from .._shared.filters import gaussian
10from .._shared.utils import _supported_float_type, safe_as_int, warn
11from ..transform import integral_image
12from ..util import img_as_float
13from ._hessian_det_appx import _hessian_matrix_det
14from .corner_cy import _corner_fast, _corner_moravec, _corner_orientations
15from .peak import peak_local_max
16from .util import _prepare_grayscale_input_2D, _prepare_grayscale_input_nD
17
18
19def _compute_derivatives(image, mode='constant', cval=0):
20"""Compute derivatives in axis directions using the Sobel operator.
21
22Parameters
23----------
24image : ndarray
25Input image.
26mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
27How to handle values outside the image borders.
28cval : float, optional
29Used in conjunction with mode 'constant', the value outside
30the image boundaries.
31
32Returns
33-------
34derivatives : list of ndarray
35Derivatives in each axis direction.
36
37"""
38
39derivatives = [
40ndi.sobel(image, axis=i, mode=mode, cval=cval) for i in range(image.ndim)
41]
42
43return derivatives
44
45
46def structure_tensor(image, sigma=1, mode='constant', cval=0, order='rc'):
47"""Compute structure tensor using sum of squared differences.
48
49The (2-dimensional) structure tensor A is defined as::
50
51A = [Arr Arc]
52[Arc Acc]
53
54which is approximated by the weighted sum of squared differences in a local
55window around each pixel in the image. This formula can be extended to a
56larger number of dimensions (see [1]_).
57
58Parameters
59----------
60image : ndarray
61Input image.
62sigma : float or array-like of float, optional
63Standard deviation used for the Gaussian kernel, which is used as a
64weighting function for the local summation of squared differences.
65If sigma is an iterable, its length must be equal to `image.ndim` and
66each element is used for the Gaussian kernel applied along its
67respective axis.
68mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
69How to handle values outside the image borders.
70cval : float, optional
71Used in conjunction with mode 'constant', the value outside
72the image boundaries.
73order : {'rc', 'xy'}, optional
74NOTE: 'xy' is only an option for 2D images, higher dimensions must
75always use 'rc' order. This parameter allows for the use of reverse or
76forward order of the image axes in gradient computation. 'rc' indicates
77the use of the first axis initially (Arr, Arc, Acc), whilst 'xy'
78indicates the usage of the last axis initially (Axx, Axy, Ayy).
79
80Returns
81-------
82A_elems : list of ndarray
83Upper-diagonal elements of the structure tensor for each pixel in the
84input image.
85
86Examples
87--------
88>>> from skimage.feature import structure_tensor
89>>> square = np.zeros((5, 5))
90>>> square[2, 2] = 1
91>>> Arr, Arc, Acc = structure_tensor(square, sigma=0.1, order='rc')
92>>> Acc
93array([[0., 0., 0., 0., 0.],
94[0., 1., 0., 1., 0.],
95[0., 4., 0., 4., 0.],
96[0., 1., 0., 1., 0.],
97[0., 0., 0., 0., 0.]])
98
99See also
100--------
101structure_tensor_eigenvalues
102
103References
104----------
105.. [1] https://en.wikipedia.org/wiki/Structure_tensor
106"""
107if order == 'xy' and image.ndim > 2:
108raise ValueError('Only "rc" order is supported for dim > 2.')
109
110if order not in ['rc', 'xy']:
111raise ValueError(f'order {order} is invalid. Must be either "rc" or "xy"')
112
113if not np.isscalar(sigma):
114sigma = tuple(sigma)
115if len(sigma) != image.ndim:
116raise ValueError('sigma must have as many elements as image ' 'has axes')
117
118image = _prepare_grayscale_input_nD(image)
119
120derivatives = _compute_derivatives(image, mode=mode, cval=cval)
121
122if order == 'xy':
123derivatives = reversed(derivatives)
124
125# structure tensor
126A_elems = [
127gaussian(der0 * der1, sigma=sigma, mode=mode, cval=cval)
128for der0, der1 in combinations_with_replacement(derivatives, 2)
129]
130
131return A_elems
132
133
134def _hessian_matrix_with_gaussian(image, sigma=1, mode='reflect', cval=0, order='rc'):
135"""Compute the Hessian via convolutions with Gaussian derivatives.
136
137In 2D, the Hessian matrix is defined as:
138H = [Hrr Hrc]
139[Hrc Hcc]
140
141which is computed by convolving the image with the second derivatives
142of the Gaussian kernel in the respective r- and c-directions.
143
144The implementation here also supports n-dimensional data.
145
146Parameters
147----------
148image : ndarray
149Input image.
150sigma : float or sequence of float, optional
151Standard deviation used for the Gaussian kernel, which sets the
152amount of smoothing in terms of pixel-distances. It is
153advised to not choose a sigma much less than 1.0, otherwise
154aliasing artifacts may occur.
155mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
156How to handle values outside the image borders.
157cval : float, optional
158Used in conjunction with mode 'constant', the value outside
159the image boundaries.
160order : {'rc', 'xy'}, optional
161This parameter allows for the use of reverse or forward order of
162the image axes in gradient computation. 'rc' indicates the use of
163the first axis initially (Hrr, Hrc, Hcc), whilst 'xy' indicates the
164usage of the last axis initially (Hxx, Hxy, Hyy)
165
166Returns
167-------
168H_elems : list of ndarray
169Upper-diagonal elements of the hessian matrix for each pixel in the
170input image. In 2D, this will be a three element list containing [Hrr,
171Hrc, Hcc]. In nD, the list will contain ``(n**2 + n) / 2`` arrays.
172
173"""
174image = img_as_float(image)
175float_dtype = _supported_float_type(image.dtype)
176image = image.astype(float_dtype, copy=False)
177if image.ndim > 2 and order == "xy":
178raise ValueError("order='xy' is only supported for 2D images.")
179if order not in ["rc", "xy"]:
180raise ValueError(f"unrecognized order: {order}")
181
182if np.isscalar(sigma):
183sigma = (sigma,) * image.ndim
184
185# This function uses `scipy.ndimage.gaussian_filter` with the order
186# argument to compute convolutions. For example, specifying
187# ``order=[1, 0]`` would apply convolution with a first-order derivative of
188# the Gaussian along the first axis and simple Gaussian smoothing along the
189# second.
190
191# For small sigma, the SciPy Gaussian filter suffers from aliasing and edge
192# artifacts, given that the filter will approximate a sinc or sinc
193# derivative which only goes to 0 very slowly (order 1/n**2). Thus, we use
194# a much larger truncate value to reduce any edge artifacts.
195truncate = 8 if all(s > 1 for s in sigma) else 100
196sq1_2 = 1 / math.sqrt(2)
197sigma_scaled = tuple(sq1_2 * s for s in sigma)
198common_kwargs = dict(sigma=sigma_scaled, mode=mode, cval=cval, truncate=truncate)
199gaussian_ = functools.partial(ndi.gaussian_filter, **common_kwargs)
200
201# Apply two successive first order Gaussian derivative operations, as
202# detailed in:
203# https://dsp.stackexchange.com/questions/78280/are-scipy-second-order-gaussian-derivatives-correct # noqa
204
205# 1.) First order along one axis while smoothing (order=0) along the other
206ndim = image.ndim
207
208# orders in 2D = ([1, 0], [0, 1])
209# in 3D = ([1, 0, 0], [0, 1, 0], [0, 0, 1])
210# etc.
211orders = tuple([0] * d + [1] + [0] * (ndim - d - 1) for d in range(ndim))
212gradients = [gaussian_(image, order=orders[d]) for d in range(ndim)]
213
214# 2.) apply the derivative along another axis as well
215axes = range(ndim)
216if order == 'xy':
217axes = reversed(axes)
218H_elems = [
219gaussian_(gradients[ax0], order=orders[ax1])
220for ax0, ax1 in combinations_with_replacement(axes, 2)
221]
222return H_elems
223
224
225def hessian_matrix(
226image, sigma=1, mode='constant', cval=0, order='rc', use_gaussian_derivatives=None
227):
228r"""Compute the Hessian matrix.
229
230In 2D, the Hessian matrix is defined as::
231
232H = [Hrr Hrc]
233[Hrc Hcc]
234
235which is computed by convolving the image with the second derivatives
236of the Gaussian kernel in the respective r- and c-directions.
237
238The implementation here also supports n-dimensional data.
239
240Parameters
241----------
242image : ndarray
243Input image.
244sigma : float
245Standard deviation used for the Gaussian kernel, which is used as
246weighting function for the auto-correlation matrix.
247mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
248How to handle values outside the image borders.
249cval : float, optional
250Used in conjunction with mode 'constant', the value outside
251the image boundaries.
252order : {'rc', 'xy'}, optional
253For 2D images, this parameter allows for the use of reverse or forward
254order of the image axes in gradient computation. 'rc' indicates the use
255of the first axis initially (Hrr, Hrc, Hcc), whilst 'xy' indicates the
256usage of the last axis initially (Hxx, Hxy, Hyy). Images with higher
257dimension must always use 'rc' order.
258use_gaussian_derivatives : boolean, optional
259Indicates whether the Hessian is computed by convolving with Gaussian
260derivatives, or by a simple finite-difference operation.
261
262Returns
263-------
264H_elems : list of ndarray
265Upper-diagonal elements of the hessian matrix for each pixel in the
266input image. In 2D, this will be a three element list containing [Hrr,
267Hrc, Hcc]. In nD, the list will contain ``(n**2 + n) / 2`` arrays.
268
269
270Notes
271-----
272The distributive property of derivatives and convolutions allows us to
273restate the derivative of an image, I, smoothed with a Gaussian kernel, G,
274as the convolution of the image with the derivative of G.
275
276.. math::
277
278\frac{\partial }{\partial x_i}(I * G) =
279I * \left( \frac{\partial }{\partial x_i} G \right)
280
281When ``use_gaussian_derivatives`` is ``True``, this property is used to
282compute the second order derivatives that make up the Hessian matrix.
283
284When ``use_gaussian_derivatives`` is ``False``, simple finite differences
285on a Gaussian-smoothed image are used instead.
286
287Examples
288--------
289>>> from skimage.feature import hessian_matrix
290>>> square = np.zeros((5, 5))
291>>> square[2, 2] = 4
292>>> Hrr, Hrc, Hcc = hessian_matrix(square, sigma=0.1, order='rc',
293... use_gaussian_derivatives=False)
294>>> Hrc
295array([[ 0., 0., 0., 0., 0.],
296[ 0., 1., 0., -1., 0.],
297[ 0., 0., 0., 0., 0.],
298[ 0., -1., 0., 1., 0.],
299[ 0., 0., 0., 0., 0.]])
300
301"""
302
303image = img_as_float(image)
304float_dtype = _supported_float_type(image.dtype)
305image = image.astype(float_dtype, copy=False)
306if image.ndim > 2 and order == "xy":
307raise ValueError("order='xy' is only supported for 2D images.")
308if order not in ["rc", "xy"]:
309raise ValueError(f"unrecognized order: {order}")
310
311if use_gaussian_derivatives is None:
312use_gaussian_derivatives = False
313warn(
314"use_gaussian_derivatives currently defaults to False, but will "
315"change to True in a future version. Please specify this "
316"argument explicitly to maintain the current behavior",
317category=FutureWarning,
318stacklevel=2,
319)
320
321if use_gaussian_derivatives:
322return _hessian_matrix_with_gaussian(
323image, sigma=sigma, mode=mode, cval=cval, order=order
324)
325
326gaussian_filtered = gaussian(image, sigma=sigma, mode=mode, cval=cval)
327
328gradients = np.gradient(gaussian_filtered)
329axes = range(image.ndim)
330
331if order == 'xy':
332axes = reversed(axes)
333
334H_elems = [
335np.gradient(gradients[ax0], axis=ax1)
336for ax0, ax1 in combinations_with_replacement(axes, 2)
337]
338return H_elems
339
340
341def hessian_matrix_det(image, sigma=1, approximate=True):
342"""Compute the approximate Hessian Determinant over an image.
343
344The 2D approximate method uses box filters over integral images to
345compute the approximate Hessian Determinant.
346
347Parameters
348----------
349image : ndarray
350The image over which to compute the Hessian Determinant.
351sigma : float, optional
352Standard deviation of the Gaussian kernel used for the Hessian
353matrix.
354approximate : bool, optional
355If ``True`` and the image is 2D, use a much faster approximate
356computation. This argument has no effect on 3D and higher images.
357
358Returns
359-------
360out : array
361The array of the Determinant of Hessians.
362
363References
364----------
365.. [1] Herbert Bay, Andreas Ess, Tinne Tuytelaars, Luc Van Gool,
366"SURF: Speeded Up Robust Features"
367ftp://ftp.vision.ee.ethz.ch/publications/articles/eth_biwi_00517.pdf
368
369Notes
370-----
371For 2D images when ``approximate=True``, the running time of this method
372only depends on size of the image. It is independent of `sigma` as one
373would expect. The downside is that the result for `sigma` less than `3`
374is not accurate, i.e., not similar to the result obtained if someone
375computed the Hessian and took its determinant.
376"""
377image = img_as_float(image)
378float_dtype = _supported_float_type(image.dtype)
379image = image.astype(float_dtype, copy=False)
380if image.ndim == 2 and approximate:
381integral = integral_image(image)
382return np.array(_hessian_matrix_det(integral, sigma))
383else: # slower brute-force implementation for nD images
384hessian_mat_array = _symmetric_image(
385hessian_matrix(image, sigma, use_gaussian_derivatives=False)
386)
387return np.linalg.det(hessian_mat_array)
388
389
390def _symmetric_compute_eigenvalues(S_elems):
391"""Compute eigenvalues from the upper-diagonal entries of a symmetric
392matrix.
393
394Parameters
395----------
396S_elems : list of ndarray
397The upper-diagonal elements of the matrix, as returned by
398`hessian_matrix` or `structure_tensor`.
399
400Returns
401-------
402eigs : ndarray
403The eigenvalues of the matrix, in decreasing order. The eigenvalues are
404the leading dimension. That is, ``eigs[i, j, k]`` contains the
405ith-largest eigenvalue at position (j, k).
406"""
407
408if len(S_elems) == 3: # Fast explicit formulas for 2D.
409M00, M01, M11 = S_elems
410eigs = np.empty((2, *M00.shape), M00.dtype)
411eigs[:] = (M00 + M11) / 2
412hsqrtdet = np.sqrt(M01**2 + ((M00 - M11) / 2) ** 2)
413eigs[0] += hsqrtdet
414eigs[1] -= hsqrtdet
415return eigs
416else:
417matrices = _symmetric_image(S_elems)
418# eigvalsh returns eigenvalues in increasing order. We want decreasing
419eigs = np.linalg.eigvalsh(matrices)[..., ::-1]
420leading_axes = tuple(range(eigs.ndim - 1))
421return np.transpose(eigs, (eigs.ndim - 1,) + leading_axes)
422
423
424def _symmetric_image(S_elems):
425"""Convert the upper-diagonal elements of a matrix to the full
426symmetric matrix.
427
428Parameters
429----------
430S_elems : list of array
431The upper-diagonal elements of the matrix, as returned by
432`hessian_matrix` or `structure_tensor`.
433
434Returns
435-------
436image : array
437An array of shape ``(M, N[, ...], image.ndim, image.ndim)``,
438containing the matrix corresponding to each coordinate.
439"""
440image = S_elems[0]
441symmetric_image = np.zeros(
442image.shape + (image.ndim, image.ndim), dtype=S_elems[0].dtype
443)
444for idx, (row, col) in enumerate(
445combinations_with_replacement(range(image.ndim), 2)
446):
447symmetric_image[..., row, col] = S_elems[idx]
448symmetric_image[..., col, row] = S_elems[idx]
449return symmetric_image
450
451
452def structure_tensor_eigenvalues(A_elems):
453"""Compute eigenvalues of structure tensor.
454
455Parameters
456----------
457A_elems : list of ndarray
458The upper-diagonal elements of the structure tensor, as returned
459by `structure_tensor`.
460
461Returns
462-------
463ndarray
464The eigenvalues of the structure tensor, in decreasing order. The
465eigenvalues are the leading dimension. That is, the coordinate
466[i, j, k] corresponds to the ith-largest eigenvalue at position (j, k).
467
468Examples
469--------
470>>> from skimage.feature import structure_tensor
471>>> from skimage.feature import structure_tensor_eigenvalues
472>>> square = np.zeros((5, 5))
473>>> square[2, 2] = 1
474>>> A_elems = structure_tensor(square, sigma=0.1, order='rc')
475>>> structure_tensor_eigenvalues(A_elems)[0]
476array([[0., 0., 0., 0., 0.],
477[0., 2., 4., 2., 0.],
478[0., 4., 0., 4., 0.],
479[0., 2., 4., 2., 0.],
480[0., 0., 0., 0., 0.]])
481
482See also
483--------
484structure_tensor
485"""
486return _symmetric_compute_eigenvalues(A_elems)
487
488
489def hessian_matrix_eigvals(H_elems):
490"""Compute eigenvalues of Hessian matrix.
491
492Parameters
493----------
494H_elems : list of ndarray
495The upper-diagonal elements of the Hessian matrix, as returned
496by `hessian_matrix`.
497
498Returns
499-------
500eigs : ndarray
501The eigenvalues of the Hessian matrix, in decreasing order. The
502eigenvalues are the leading dimension. That is, ``eigs[i, j, k]``
503contains the ith-largest eigenvalue at position (j, k).
504
505Examples
506--------
507>>> from skimage.feature import hessian_matrix, hessian_matrix_eigvals
508>>> square = np.zeros((5, 5))
509>>> square[2, 2] = 4
510>>> H_elems = hessian_matrix(square, sigma=0.1, order='rc',
511... use_gaussian_derivatives=False)
512>>> hessian_matrix_eigvals(H_elems)[0]
513array([[ 0., 0., 2., 0., 0.],
514[ 0., 1., 0., 1., 0.],
515[ 2., 0., -2., 0., 2.],
516[ 0., 1., 0., 1., 0.],
517[ 0., 0., 2., 0., 0.]])
518"""
519return _symmetric_compute_eigenvalues(H_elems)
520
521
522def shape_index(image, sigma=1, mode='constant', cval=0):
523"""Compute the shape index.
524
525The shape index, as defined by Koenderink & van Doorn [1]_, is a
526single valued measure of local curvature, assuming the image as a 3D plane
527with intensities representing heights.
528
529It is derived from the eigenvalues of the Hessian, and its
530value ranges from -1 to 1 (and is undefined (=NaN) in *flat* regions),
531with following ranges representing following shapes:
532
533.. table:: Ranges of the shape index and corresponding shapes.
534
535=================== =============
536Interval (s in ...) Shape
537=================== =============
538[ -1, -7/8) Spherical cup
539[-7/8, -5/8) Through
540[-5/8, -3/8) Rut
541[-3/8, -1/8) Saddle rut
542[-1/8, +1/8) Saddle
543[+1/8, +3/8) Saddle ridge
544[+3/8, +5/8) Ridge
545[+5/8, +7/8) Dome
546[+7/8, +1] Spherical cap
547=================== =============
548
549Parameters
550----------
551image : (M, N) ndarray
552Input image.
553sigma : float, optional
554Standard deviation used for the Gaussian kernel, which is used for
555smoothing the input data before Hessian eigen value calculation.
556mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
557How to handle values outside the image borders
558cval : float, optional
559Used in conjunction with mode 'constant', the value outside
560the image boundaries.
561
562Returns
563-------
564s : ndarray
565Shape index
566
567References
568----------
569.. [1] Koenderink, J. J. & van Doorn, A. J.,
570"Surface shape and curvature scales",
571Image and Vision Computing, 1992, 10, 557-564.
572:DOI:`10.1016/0262-8856(92)90076-F`
573
574Examples
575--------
576>>> from skimage.feature import shape_index
577>>> square = np.zeros((5, 5))
578>>> square[2, 2] = 4
579>>> s = shape_index(square, sigma=0.1)
580>>> s
581array([[ nan, nan, -0.5, nan, nan],
582[ nan, -0. , nan, -0. , nan],
583[-0.5, nan, -1. , nan, -0.5],
584[ nan, -0. , nan, -0. , nan],
585[ nan, nan, -0.5, nan, nan]])
586"""
587
588H = hessian_matrix(
589image,
590sigma=sigma,
591mode=mode,
592cval=cval,
593order='rc',
594use_gaussian_derivatives=False,
595)
596l1, l2 = hessian_matrix_eigvals(H)
597
598# don't warn on divide by 0 as occurs in the docstring example
599with np.errstate(divide='ignore', invalid='ignore'):
600return (2.0 / np.pi) * np.arctan((l2 + l1) / (l2 - l1))
601
602
603def corner_kitchen_rosenfeld(image, mode='constant', cval=0):
604"""Compute Kitchen and Rosenfeld corner measure response image.
605
606The corner measure is calculated as follows::
607
608(imxx * imy**2 + imyy * imx**2 - 2 * imxy * imx * imy)
609/ (imx**2 + imy**2)
610
611Where imx and imy are the first and imxx, imxy, imyy the second
612derivatives.
613
614Parameters
615----------
616image : (M, N) ndarray
617Input image.
618mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
619How to handle values outside the image borders.
620cval : float, optional
621Used in conjunction with mode 'constant', the value outside
622the image boundaries.
623
624Returns
625-------
626response : ndarray
627Kitchen and Rosenfeld response image.
628
629References
630----------
631.. [1] Kitchen, L., & Rosenfeld, A. (1982). Gray-level corner detection.
632Pattern recognition letters, 1(2), 95-102.
633:DOI:`10.1016/0167-8655(82)90020-4`
634"""
635
636float_dtype = _supported_float_type(image.dtype)
637image = image.astype(float_dtype, copy=False)
638
639imy, imx = _compute_derivatives(image, mode=mode, cval=cval)
640imxy, imxx = _compute_derivatives(imx, mode=mode, cval=cval)
641imyy, imyx = _compute_derivatives(imy, mode=mode, cval=cval)
642
643numerator = imxx * imy**2 + imyy * imx**2 - 2 * imxy * imx * imy
644denominator = imx**2 + imy**2
645
646response = np.zeros_like(image, dtype=float_dtype)
647
648mask = denominator != 0
649response[mask] = numerator[mask] / denominator[mask]
650
651return response
652
653
654def corner_harris(image, method='k', k=0.05, eps=1e-6, sigma=1):
655"""Compute Harris corner measure response image.
656
657This corner detector uses information from the auto-correlation matrix A::
658
659A = [(imx**2) (imx*imy)] = [Axx Axy]
660[(imx*imy) (imy**2)] [Axy Ayy]
661
662Where imx and imy are first derivatives, averaged with a gaussian filter.
663The corner measure is then defined as::
664
665det(A) - k * trace(A)**2
666
667or::
668
6692 * det(A) / (trace(A) + eps)
670
671Parameters
672----------
673image : (M, N) ndarray
674Input image.
675method : {'k', 'eps'}, optional
676Method to compute the response image from the auto-correlation matrix.
677k : float, optional
678Sensitivity factor to separate corners from edges, typically in range
679`[0, 0.2]`. Small values of k result in detection of sharp corners.
680eps : float, optional
681Normalisation factor (Noble's corner measure).
682sigma : float, optional
683Standard deviation used for the Gaussian kernel, which is used as
684weighting function for the auto-correlation matrix.
685
686Returns
687-------
688response : ndarray
689Harris response image.
690
691References
692----------
693.. [1] https://en.wikipedia.org/wiki/Corner_detection
694
695Examples
696--------
697>>> from skimage.feature import corner_harris, corner_peaks
698>>> square = np.zeros([10, 10])
699>>> square[2:8, 2:8] = 1
700>>> square.astype(int)
701array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
702[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
703[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
704[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
705[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
706[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
707[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
708[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
709[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
710[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
711>>> corner_peaks(corner_harris(square), min_distance=1)
712array([[2, 2],
713[2, 7],
714[7, 2],
715[7, 7]])
716
717"""
718
719Arr, Arc, Acc = structure_tensor(image, sigma, order='rc')
720
721# determinant
722detA = Arr * Acc - Arc**2
723# trace
724traceA = Arr + Acc
725
726if method == 'k':
727response = detA - k * traceA**2
728else:
729response = 2 * detA / (traceA + eps)
730
731return response
732
733
734def corner_shi_tomasi(image, sigma=1):
735"""Compute Shi-Tomasi (Kanade-Tomasi) corner measure response image.
736
737This corner detector uses information from the auto-correlation matrix A::
738
739A = [(imx**2) (imx*imy)] = [Axx Axy]
740[(imx*imy) (imy**2)] [Axy Ayy]
741
742Where imx and imy are first derivatives, averaged with a gaussian filter.
743The corner measure is then defined as the smaller eigenvalue of A::
744
745((Axx + Ayy) - sqrt((Axx - Ayy)**2 + 4 * Axy**2)) / 2
746
747Parameters
748----------
749image : (M, N) ndarray
750Input image.
751sigma : float, optional
752Standard deviation used for the Gaussian kernel, which is used as
753weighting function for the auto-correlation matrix.
754
755Returns
756-------
757response : ndarray
758Shi-Tomasi response image.
759
760References
761----------
762.. [1] https://en.wikipedia.org/wiki/Corner_detection
763
764Examples
765--------
766>>> from skimage.feature import corner_shi_tomasi, corner_peaks
767>>> square = np.zeros([10, 10])
768>>> square[2:8, 2:8] = 1
769>>> square.astype(int)
770array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
771[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
772[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
773[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
774[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
775[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
776[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
777[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
778[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
779[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
780>>> corner_peaks(corner_shi_tomasi(square), min_distance=1)
781array([[2, 2],
782[2, 7],
783[7, 2],
784[7, 7]])
785
786"""
787
788Arr, Arc, Acc = structure_tensor(image, sigma, order='rc')
789
790# minimum eigenvalue of A
791response = ((Arr + Acc) - np.sqrt((Arr - Acc) ** 2 + 4 * Arc**2)) / 2
792
793return response
794
795
796def corner_foerstner(image, sigma=1):
797"""Compute Foerstner corner measure response image.
798
799This corner detector uses information from the auto-correlation matrix A::
800
801A = [(imx**2) (imx*imy)] = [Axx Axy]
802[(imx*imy) (imy**2)] [Axy Ayy]
803
804Where imx and imy are first derivatives, averaged with a gaussian filter.
805The corner measure is then defined as::
806
807w = det(A) / trace(A) (size of error ellipse)
808q = 4 * det(A) / trace(A)**2 (roundness of error ellipse)
809
810Parameters
811----------
812image : (M, N) ndarray
813Input image.
814sigma : float, optional
815Standard deviation used for the Gaussian kernel, which is used as
816weighting function for the auto-correlation matrix.
817
818Returns
819-------
820w : ndarray
821Error ellipse sizes.
822q : ndarray
823Roundness of error ellipse.
824
825References
826----------
827.. [1] Förstner, W., & Gülch, E. (1987, June). A fast operator for
828detection and precise location of distinct points, corners and
829centres of circular features. In Proc. ISPRS intercommission
830conference on fast processing of photogrammetric data (pp. 281-305).
831https://cseweb.ucsd.edu/classes/sp02/cse252/foerstner/foerstner.pdf
832.. [2] https://en.wikipedia.org/wiki/Corner_detection
833
834Examples
835--------
836>>> from skimage.feature import corner_foerstner, corner_peaks
837>>> square = np.zeros([10, 10])
838>>> square[2:8, 2:8] = 1
839>>> square.astype(int)
840array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
841[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
842[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
843[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
844[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
845[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
846[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
847[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
848[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
849[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
850>>> w, q = corner_foerstner(square)
851>>> accuracy_thresh = 0.5
852>>> roundness_thresh = 0.3
853>>> foerstner = (q > roundness_thresh) * (w > accuracy_thresh) * w
854>>> corner_peaks(foerstner, min_distance=1)
855array([[2, 2],
856[2, 7],
857[7, 2],
858[7, 7]])
859
860"""
861
862Arr, Arc, Acc = structure_tensor(image, sigma, order='rc')
863
864# determinant
865detA = Arr * Acc - Arc**2
866# trace
867traceA = Arr + Acc
868
869w = np.zeros_like(image, dtype=detA.dtype)
870q = np.zeros_like(w)
871
872mask = traceA != 0
873
874w[mask] = detA[mask] / traceA[mask]
875q[mask] = 4 * detA[mask] / traceA[mask] ** 2
876
877return w, q
878
879
880def corner_fast(image, n=12, threshold=0.15):
881"""Extract FAST corners for a given image.
882
883Parameters
884----------
885image : (M, N) ndarray
886Input image.
887n : int, optional
888Minimum number of consecutive pixels out of 16 pixels on the circle
889that should all be either brighter or darker w.r.t testpixel.
890A point c on the circle is darker w.r.t test pixel p if
891`Ic < Ip - threshold` and brighter if `Ic > Ip + threshold`. Also
892stands for the n in `FAST-n` corner detector.
893threshold : float, optional
894Threshold used in deciding whether the pixels on the circle are
895brighter, darker or similar w.r.t. the test pixel. Decrease the
896threshold when more corners are desired and vice-versa.
897
898Returns
899-------
900response : ndarray
901FAST corner response image.
902
903References
904----------
905.. [1] Rosten, E., & Drummond, T. (2006, May). Machine learning for
906high-speed corner detection. In European conference on computer
907vision (pp. 430-443). Springer, Berlin, Heidelberg.
908:DOI:`10.1007/11744023_34`
909http://www.edwardrosten.com/work/rosten_2006_machine.pdf
910.. [2] Wikipedia, "Features from accelerated segment test",
911https://en.wikipedia.org/wiki/Features_from_accelerated_segment_test
912
913Examples
914--------
915>>> from skimage.feature import corner_fast, corner_peaks
916>>> square = np.zeros((12, 12))
917>>> square[3:9, 3:9] = 1
918>>> square.astype(int)
919array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
920[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
921[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
922[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
923[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
924[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
925[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
926[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
927[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
928[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
929[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
930[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
931>>> corner_peaks(corner_fast(square, 9), min_distance=1)
932array([[3, 3],
933[3, 8],
934[8, 3],
935[8, 8]])
936
937"""
938image = _prepare_grayscale_input_2D(image)
939
940image = np.ascontiguousarray(image)
941response = _corner_fast(image, n, threshold)
942return response
943
944
945def corner_subpix(image, corners, window_size=11, alpha=0.99):
946"""Determine subpixel position of corners.
947
948A statistical test decides whether the corner is defined as the
949intersection of two edges or a single peak. Depending on the classification
950result, the subpixel corner location is determined based on the local
951covariance of the grey-values. If the significance level for either
952statistical test is not sufficient, the corner cannot be classified, and
953the output subpixel position is set to NaN.
954
955Parameters
956----------
957image : (M, N) ndarray
958Input image.
959corners : (K, 2) ndarray
960Corner coordinates `(row, col)`.
961window_size : int, optional
962Search window size for subpixel estimation.
963alpha : float, optional
964Significance level for corner classification.
965
966Returns
967-------
968positions : (K, 2) ndarray
969Subpixel corner positions. NaN for "not classified" corners.
970
971References
972----------
973.. [1] Förstner, W., & Gülch, E. (1987, June). A fast operator for
974detection and precise location of distinct points, corners and
975centres of circular features. In Proc. ISPRS intercommission
976conference on fast processing of photogrammetric data (pp. 281-305).
977https://cseweb.ucsd.edu/classes/sp02/cse252/foerstner/foerstner.pdf
978.. [2] https://en.wikipedia.org/wiki/Corner_detection
979
980Examples
981--------
982>>> from skimage.feature import corner_harris, corner_peaks, corner_subpix
983>>> img = np.zeros((10, 10))
984>>> img[:5, :5] = 1
985>>> img[5:, 5:] = 1
986>>> img.astype(int)
987array([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
988[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
989[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
990[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
991[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
992[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
993[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
994[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
995[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
996[0, 0, 0, 0, 0, 1, 1, 1, 1, 1]])
997>>> coords = corner_peaks(corner_harris(img), min_distance=2)
998>>> coords_subpix = corner_subpix(img, coords, window_size=7)
999>>> coords_subpix
1000array([[4.5, 4.5]])
1001
1002"""
1003
1004# window extent in one direction
1005wext = (window_size - 1) // 2
1006
1007float_dtype = _supported_float_type(image.dtype)
1008image = image.astype(float_dtype, copy=False)
1009image = np.pad(image, pad_width=wext, mode='constant', constant_values=0)
1010
1011# add pad width, make sure to not modify the input values in-place
1012corners = safe_as_int(corners + wext)
1013
1014# normal equation arrays
1015N_dot = np.zeros((2, 2), dtype=float_dtype)
1016N_edge = np.zeros((2, 2), dtype=float_dtype)
1017b_dot = np.zeros((2,), dtype=float_dtype)
1018b_edge = np.zeros((2,), dtype=float_dtype)
1019
1020# critical statistical test values
1021redundancy = window_size**2 - 2
1022t_crit_dot = stats.f.isf(1 - alpha, redundancy, redundancy)
1023t_crit_edge = stats.f.isf(alpha, redundancy, redundancy)
1024
1025# coordinates of pixels within window
1026y, x = np.mgrid[-wext : wext + 1, -wext : wext + 1]
1027
1028corners_subpix = np.zeros_like(corners, dtype=float_dtype)
1029
1030for i, (y0, x0) in enumerate(corners):
1031# crop window around corner + border for sobel operator
1032miny = y0 - wext - 1
1033maxy = y0 + wext + 2
1034minx = x0 - wext - 1
1035maxx = x0 + wext + 2
1036window = image[miny:maxy, minx:maxx]
1037
1038winy, winx = _compute_derivatives(window, mode='constant', cval=0)
1039
1040# compute gradient squares and remove border
1041winx_winx = (winx * winx)[1:-1, 1:-1]
1042winx_winy = (winx * winy)[1:-1, 1:-1]
1043winy_winy = (winy * winy)[1:-1, 1:-1]
1044
1045# sum of squared differences (mean instead of gaussian filter)
1046Axx = np.sum(winx_winx)
1047Axy = np.sum(winx_winy)
1048Ayy = np.sum(winy_winy)
1049
1050# sum of squared differences weighted with coordinates
1051# (mean instead of gaussian filter)
1052bxx_x = np.sum(winx_winx * x)
1053bxx_y = np.sum(winx_winx * y)
1054bxy_x = np.sum(winx_winy * x)
1055bxy_y = np.sum(winx_winy * y)
1056byy_x = np.sum(winy_winy * x)
1057byy_y = np.sum(winy_winy * y)
1058
1059# normal equations for subpixel position
1060N_dot[0, 0] = Axx
1061N_dot[0, 1] = N_dot[1, 0] = -Axy
1062N_dot[1, 1] = Ayy
1063
1064N_edge[0, 0] = Ayy
1065N_edge[0, 1] = N_edge[1, 0] = Axy
1066N_edge[1, 1] = Axx
1067
1068b_dot[:] = bxx_y - bxy_x, byy_x - bxy_y
1069b_edge[:] = byy_y + bxy_x, bxx_x + bxy_y
1070
1071# estimated positions
1072try:
1073est_dot = np.linalg.solve(N_dot, b_dot)
1074est_edge = np.linalg.solve(N_edge, b_edge)
1075except np.linalg.LinAlgError:
1076# if image is constant the system is singular
1077corners_subpix[i, :] = np.nan, np.nan
1078continue
1079
1080# residuals
1081ry_dot = y - est_dot[0]
1082rx_dot = x - est_dot[1]
1083ry_edge = y - est_edge[0]
1084rx_edge = x - est_edge[1]
1085# squared residuals
1086rxx_dot = rx_dot * rx_dot
1087rxy_dot = rx_dot * ry_dot
1088ryy_dot = ry_dot * ry_dot
1089rxx_edge = rx_edge * rx_edge
1090rxy_edge = rx_edge * ry_edge
1091ryy_edge = ry_edge * ry_edge
1092
1093# determine corner class (dot or edge)
1094# variance for different models
1095var_dot = np.sum(
1096winx_winx * ryy_dot - 2 * winx_winy * rxy_dot + winy_winy * rxx_dot
1097)
1098var_edge = np.sum(
1099winy_winy * ryy_edge + 2 * winx_winy * rxy_edge + winx_winx * rxx_edge
1100)
1101
1102# test value (F-distributed)
1103if var_dot < np.spacing(1) and var_edge < np.spacing(1):
1104t = np.nan
1105elif var_dot == 0:
1106t = np.inf
1107else:
1108t = var_edge / var_dot
1109
1110# 1 for edge, -1 for dot, 0 for "not classified"
1111corner_class = int(t < t_crit_edge) - int(t > t_crit_dot)
1112
1113if corner_class == -1:
1114corners_subpix[i, :] = y0 + est_dot[0], x0 + est_dot[1]
1115elif corner_class == 0:
1116corners_subpix[i, :] = np.nan, np.nan
1117elif corner_class == 1:
1118corners_subpix[i, :] = y0 + est_edge[0], x0 + est_edge[1]
1119
1120# subtract pad width
1121corners_subpix -= wext
1122
1123return corners_subpix
1124
1125
1126def corner_peaks(
1127image,
1128min_distance=1,
1129threshold_abs=None,
1130threshold_rel=None,
1131exclude_border=True,
1132indices=True,
1133num_peaks=np.inf,
1134footprint=None,
1135labels=None,
1136*,
1137num_peaks_per_label=np.inf,
1138p_norm=np.inf,
1139):
1140"""Find peaks in corner measure response image.
1141
1142This differs from `skimage.feature.peak_local_max` in that it suppresses
1143multiple connected peaks with the same accumulator value.
1144
1145Parameters
1146----------
1147image : (M, N) ndarray
1148Input image.
1149min_distance : int, optional
1150The minimal allowed distance separating peaks.
1151* : *
1152See :py:meth:`skimage.feature.peak_local_max`.
1153p_norm : float
1154Which Minkowski p-norm to use. Should be in the range [1, inf].
1155A finite large p may cause a ValueError if overflow can occur.
1156``inf`` corresponds to the Chebyshev distance and 2 to the
1157Euclidean distance.
1158
1159Returns
1160-------
1161output : ndarray or ndarray of bools
1162
1163* If `indices = True` : (row, column, ...) coordinates of peaks.
1164* If `indices = False` : Boolean array shaped like `image`, with peaks
1165represented by True values.
1166
1167See also
1168--------
1169skimage.feature.peak_local_max
1170
1171Notes
1172-----
1173.. versionchanged:: 0.18
1174The default value of `threshold_rel` has changed to None, which
1175corresponds to letting `skimage.feature.peak_local_max` decide on the
1176default. This is equivalent to `threshold_rel=0`.
1177
1178The `num_peaks` limit is applied before suppression of connected peaks.
1179To limit the number of peaks after suppression, set `num_peaks=np.inf` and
1180post-process the output of this function.
1181
1182Examples
1183--------
1184>>> from skimage.feature import peak_local_max
1185>>> response = np.zeros((5, 5))
1186>>> response[2:4, 2:4] = 1
1187>>> response
1188array([[0., 0., 0., 0., 0.],
1189[0., 0., 0., 0., 0.],
1190[0., 0., 1., 1., 0.],
1191[0., 0., 1., 1., 0.],
1192[0., 0., 0., 0., 0.]])
1193>>> peak_local_max(response)
1194array([[2, 2],
1195[2, 3],
1196[3, 2],
1197[3, 3]])
1198>>> corner_peaks(response)
1199array([[2, 2]])
1200
1201"""
1202if np.isinf(num_peaks):
1203num_peaks = None
1204
1205# Get the coordinates of the detected peaks
1206coords = peak_local_max(
1207image,
1208min_distance=min_distance,
1209threshold_abs=threshold_abs,
1210threshold_rel=threshold_rel,
1211exclude_border=exclude_border,
1212num_peaks=np.inf,
1213footprint=footprint,
1214labels=labels,
1215num_peaks_per_label=num_peaks_per_label,
1216)
1217
1218if len(coords):
1219# Use KDtree to find the peaks that are too close to each other
1220tree = spatial.cKDTree(coords)
1221
1222rejected_peaks_indices = set()
1223for idx, point in enumerate(coords):
1224if idx not in rejected_peaks_indices:
1225candidates = tree.query_ball_point(point, r=min_distance, p=p_norm)
1226candidates.remove(idx)
1227rejected_peaks_indices.update(candidates)
1228
1229# Remove the peaks that are too close to each other
1230coords = np.delete(coords, tuple(rejected_peaks_indices), axis=0)[:num_peaks]
1231
1232if indices:
1233return coords
1234
1235peaks = np.zeros_like(image, dtype=bool)
1236peaks[tuple(coords.T)] = True
1237
1238return peaks
1239
1240
1241def corner_moravec(image, window_size=1):
1242"""Compute Moravec corner measure response image.
1243
1244This is one of the simplest corner detectors and is comparatively fast but
1245has several limitations (e.g. not rotation invariant).
1246
1247Parameters
1248----------
1249image : (M, N) ndarray
1250Input image.
1251window_size : int, optional
1252Window size.
1253
1254Returns
1255-------
1256response : ndarray
1257Moravec response image.
1258
1259References
1260----------
1261.. [1] https://en.wikipedia.org/wiki/Corner_detection
1262
1263Examples
1264--------
1265>>> from skimage.feature import corner_moravec
1266>>> square = np.zeros([7, 7])
1267>>> square[3, 3] = 1
1268>>> square.astype(int)
1269array([[0, 0, 0, 0, 0, 0, 0],
1270[0, 0, 0, 0, 0, 0, 0],
1271[0, 0, 0, 0, 0, 0, 0],
1272[0, 0, 0, 1, 0, 0, 0],
1273[0, 0, 0, 0, 0, 0, 0],
1274[0, 0, 0, 0, 0, 0, 0],
1275[0, 0, 0, 0, 0, 0, 0]])
1276>>> corner_moravec(square).astype(int)
1277array([[0, 0, 0, 0, 0, 0, 0],
1278[0, 0, 0, 0, 0, 0, 0],
1279[0, 0, 1, 1, 1, 0, 0],
1280[0, 0, 1, 2, 1, 0, 0],
1281[0, 0, 1, 1, 1, 0, 0],
1282[0, 0, 0, 0, 0, 0, 0],
1283[0, 0, 0, 0, 0, 0, 0]])
1284"""
1285image = img_as_float(image)
1286float_dtype = _supported_float_type(image.dtype)
1287image = image.astype(float_dtype, copy=False)
1288return _corner_moravec(np.ascontiguousarray(image), window_size)
1289
1290
1291def corner_orientations(image, corners, mask):
1292"""Compute the orientation of corners.
1293
1294The orientation of corners is computed using the first order central moment
1295i.e. the center of mass approach. The corner orientation is the angle of
1296the vector from the corner coordinate to the intensity centroid in the
1297local neighborhood around the corner calculated using first order central
1298moment.
1299
1300Parameters
1301----------
1302image : (M, N) array
1303Input grayscale image.
1304corners : (K, 2) array
1305Corner coordinates as ``(row, col)``.
1306mask : 2D array
1307Mask defining the local neighborhood of the corner used for the
1308calculation of the central moment.
1309
1310Returns
1311-------
1312orientations : (K, 1) array
1313Orientations of corners in the range [-pi, pi].
1314
1315References
1316----------
1317.. [1] Ethan Rublee, Vincent Rabaud, Kurt Konolige and Gary Bradski
1318"ORB : An efficient alternative to SIFT and SURF"
1319http://www.vision.cs.chubu.ac.jp/CV-R/pdf/Rublee_iccv2011.pdf
1320.. [2] Paul L. Rosin, "Measuring Corner Properties"
1321http://users.cs.cf.ac.uk/Paul.Rosin/corner2.pdf
1322
1323Examples
1324--------
1325>>> from skimage.morphology import octagon
1326>>> from skimage.feature import (corner_fast, corner_peaks,
1327... corner_orientations)
1328>>> square = np.zeros((12, 12))
1329>>> square[3:9, 3:9] = 1
1330>>> square.astype(int)
1331array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
1332[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
1333[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
1334[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
1335[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
1336[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
1337[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
1338[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
1339[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
1340[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
1341[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
1342[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
1343>>> corners = corner_peaks(corner_fast(square, 9), min_distance=1)
1344>>> corners
1345array([[3, 3],
1346[3, 8],
1347[8, 3],
1348[8, 8]])
1349>>> orientations = corner_orientations(square, corners, octagon(3, 2))
1350>>> np.rad2deg(orientations)
1351array([ 45., 135., -45., -135.])
1352
1353"""
1354image = _prepare_grayscale_input_2D(image)
1355return _corner_orientations(image, corners, mask)
1356