scikit-image

Форк
0
1355 строк · 44.5 Кб
1
import functools
2
import math
3
from itertools import combinations_with_replacement
4

5
import numpy as np
6
from scipy import ndimage as ndi
7
from scipy import spatial, stats
8

9
from .._shared.filters import gaussian
10
from .._shared.utils import _supported_float_type, safe_as_int, warn
11
from ..transform import integral_image
12
from ..util import img_as_float
13
from ._hessian_det_appx import _hessian_matrix_det
14
from .corner_cy import _corner_fast, _corner_moravec, _corner_orientations
15
from .peak import peak_local_max
16
from .util import _prepare_grayscale_input_2D, _prepare_grayscale_input_nD
17

18

19
def _compute_derivatives(image, mode='constant', cval=0):
20
    """Compute derivatives in axis directions using the Sobel operator.
21

22
    Parameters
23
    ----------
24
    image : ndarray
25
        Input image.
26
    mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
27
        How to handle values outside the image borders.
28
    cval : float, optional
29
        Used in conjunction with mode 'constant', the value outside
30
        the image boundaries.
31

32
    Returns
33
    -------
34
    derivatives : list of ndarray
35
        Derivatives in each axis direction.
36

37
    """
38

39
    derivatives = [
40
        ndi.sobel(image, axis=i, mode=mode, cval=cval) for i in range(image.ndim)
41
    ]
42

43
    return derivatives
44

45

46
def structure_tensor(image, sigma=1, mode='constant', cval=0, order='rc'):
47
    """Compute structure tensor using sum of squared differences.
48

49
    The (2-dimensional) structure tensor A is defined as::
50

51
        A = [Arr Arc]
52
            [Arc Acc]
53

54
    which is approximated by the weighted sum of squared differences in a local
55
    window around each pixel in the image. This formula can be extended to a
56
    larger number of dimensions (see [1]_).
57

58
    Parameters
59
    ----------
60
    image : ndarray
61
        Input image.
62
    sigma : float or array-like of float, optional
63
        Standard deviation used for the Gaussian kernel, which is used as a
64
        weighting function for the local summation of squared differences.
65
        If sigma is an iterable, its length must be equal to `image.ndim` and
66
        each element is used for the Gaussian kernel applied along its
67
        respective axis.
68
    mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
69
        How to handle values outside the image borders.
70
    cval : float, optional
71
        Used in conjunction with mode 'constant', the value outside
72
        the image boundaries.
73
    order : {'rc', 'xy'}, optional
74
        NOTE: 'xy' is only an option for 2D images, higher dimensions must
75
        always use 'rc' order. This parameter allows for the use of reverse or
76
        forward order of the image axes in gradient computation. 'rc' indicates
77
        the use of the first axis initially (Arr, Arc, Acc), whilst 'xy'
78
        indicates the usage of the last axis initially (Axx, Axy, Ayy).
79

80
    Returns
81
    -------
82
    A_elems : list of ndarray
83
        Upper-diagonal elements of the structure tensor for each pixel in the
84
        input image.
85

86
    Examples
87
    --------
88
    >>> from skimage.feature import structure_tensor
89
    >>> square = np.zeros((5, 5))
90
    >>> square[2, 2] = 1
91
    >>> Arr, Arc, Acc = structure_tensor(square, sigma=0.1, order='rc')
92
    >>> Acc
93
    array([[0., 0., 0., 0., 0.],
94
           [0., 1., 0., 1., 0.],
95
           [0., 4., 0., 4., 0.],
96
           [0., 1., 0., 1., 0.],
97
           [0., 0., 0., 0., 0.]])
98

99
    See also
100
    --------
101
    structure_tensor_eigenvalues
102

103
    References
104
    ----------
105
    .. [1] https://en.wikipedia.org/wiki/Structure_tensor
106
    """
107
    if order == 'xy' and image.ndim > 2:
108
        raise ValueError('Only "rc" order is supported for dim > 2.')
109

110
    if order not in ['rc', 'xy']:
111
        raise ValueError(f'order {order} is invalid. Must be either "rc" or "xy"')
112

113
    if not np.isscalar(sigma):
114
        sigma = tuple(sigma)
115
        if len(sigma) != image.ndim:
116
            raise ValueError('sigma must have as many elements as image ' 'has axes')
117

118
    image = _prepare_grayscale_input_nD(image)
119

120
    derivatives = _compute_derivatives(image, mode=mode, cval=cval)
121

122
    if order == 'xy':
123
        derivatives = reversed(derivatives)
124

125
    # structure tensor
126
    A_elems = [
127
        gaussian(der0 * der1, sigma=sigma, mode=mode, cval=cval)
128
        for der0, der1 in combinations_with_replacement(derivatives, 2)
129
    ]
130

131
    return A_elems
132

133

134
def _hessian_matrix_with_gaussian(image, sigma=1, mode='reflect', cval=0, order='rc'):
135
    """Compute the Hessian via convolutions with Gaussian derivatives.
136

137
    In 2D, the Hessian matrix is defined as:
138
        H = [Hrr Hrc]
139
            [Hrc Hcc]
140

141
    which is computed by convolving the image with the second derivatives
142
    of the Gaussian kernel in the respective r- and c-directions.
143

144
    The implementation here also supports n-dimensional data.
145

146
    Parameters
147
    ----------
148
    image : ndarray
149
        Input image.
150
    sigma : float or sequence of float, optional
151
        Standard deviation used for the Gaussian kernel, which sets the
152
        amount of smoothing in terms of pixel-distances. It is
153
        advised to not choose a sigma much less than 1.0, otherwise
154
        aliasing artifacts may occur.
155
    mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
156
        How to handle values outside the image borders.
157
    cval : float, optional
158
        Used in conjunction with mode 'constant', the value outside
159
        the image boundaries.
160
    order : {'rc', 'xy'}, optional
161
        This parameter allows for the use of reverse or forward order of
162
        the image axes in gradient computation. 'rc' indicates the use of
163
        the first axis initially (Hrr, Hrc, Hcc), whilst 'xy' indicates the
164
        usage of the last axis initially (Hxx, Hxy, Hyy)
165

166
    Returns
167
    -------
168
    H_elems : list of ndarray
169
        Upper-diagonal elements of the hessian matrix for each pixel in the
170
        input image. In 2D, this will be a three element list containing [Hrr,
171
        Hrc, Hcc]. In nD, the list will contain ``(n**2 + n) / 2`` arrays.
172

173
    """
174
    image = img_as_float(image)
175
    float_dtype = _supported_float_type(image.dtype)
176
    image = image.astype(float_dtype, copy=False)
177
    if image.ndim > 2 and order == "xy":
178
        raise ValueError("order='xy' is only supported for 2D images.")
179
    if order not in ["rc", "xy"]:
180
        raise ValueError(f"unrecognized order: {order}")
181

182
    if np.isscalar(sigma):
183
        sigma = (sigma,) * image.ndim
184

185
    # This function uses `scipy.ndimage.gaussian_filter` with the order
186
    # argument to compute convolutions. For example, specifying
187
    # ``order=[1, 0]`` would apply convolution with a first-order derivative of
188
    # the Gaussian along the first axis and simple Gaussian smoothing along the
189
    # second.
190

191
    # For small sigma, the SciPy Gaussian filter suffers from aliasing and edge
192
    # artifacts, given that the filter will approximate a sinc or sinc
193
    # derivative which only goes to 0 very slowly (order 1/n**2). Thus, we use
194
    # a much larger truncate value to reduce any edge artifacts.
195
    truncate = 8 if all(s > 1 for s in sigma) else 100
196
    sq1_2 = 1 / math.sqrt(2)
197
    sigma_scaled = tuple(sq1_2 * s for s in sigma)
198
    common_kwargs = dict(sigma=sigma_scaled, mode=mode, cval=cval, truncate=truncate)
199
    gaussian_ = functools.partial(ndi.gaussian_filter, **common_kwargs)
200

201
    # Apply two successive first order Gaussian derivative operations, as
202
    # detailed in:
203
    # https://dsp.stackexchange.com/questions/78280/are-scipy-second-order-gaussian-derivatives-correct  # noqa
204

205
    # 1.) First order along one axis while smoothing (order=0) along the other
206
    ndim = image.ndim
207

208
    # orders in 2D = ([1, 0], [0, 1])
209
    #        in 3D = ([1, 0, 0], [0, 1, 0], [0, 0, 1])
210
    #        etc.
211
    orders = tuple([0] * d + [1] + [0] * (ndim - d - 1) for d in range(ndim))
212
    gradients = [gaussian_(image, order=orders[d]) for d in range(ndim)]
213

214
    # 2.) apply the derivative along another axis as well
215
    axes = range(ndim)
216
    if order == 'xy':
217
        axes = reversed(axes)
218
    H_elems = [
219
        gaussian_(gradients[ax0], order=orders[ax1])
220
        for ax0, ax1 in combinations_with_replacement(axes, 2)
221
    ]
222
    return H_elems
223

224

225
def hessian_matrix(
226
    image, sigma=1, mode='constant', cval=0, order='rc', use_gaussian_derivatives=None
227
):
228
    r"""Compute the Hessian matrix.
229

230
    In 2D, the Hessian matrix is defined as::
231

232
        H = [Hrr Hrc]
233
            [Hrc Hcc]
234

235
    which is computed by convolving the image with the second derivatives
236
    of the Gaussian kernel in the respective r- and c-directions.
237

238
    The implementation here also supports n-dimensional data.
239

240
    Parameters
241
    ----------
242
    image : ndarray
243
        Input image.
244
    sigma : float
245
        Standard deviation used for the Gaussian kernel, which is used as
246
        weighting function for the auto-correlation matrix.
247
    mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
248
        How to handle values outside the image borders.
249
    cval : float, optional
250
        Used in conjunction with mode 'constant', the value outside
251
        the image boundaries.
252
    order : {'rc', 'xy'}, optional
253
        For 2D images, this parameter allows for the use of reverse or forward
254
        order of the image axes in gradient computation. 'rc' indicates the use
255
        of the first axis initially (Hrr, Hrc, Hcc), whilst 'xy' indicates the
256
        usage of the last axis initially (Hxx, Hxy, Hyy). Images with higher
257
        dimension must always use 'rc' order.
258
    use_gaussian_derivatives : boolean, optional
259
        Indicates whether the Hessian is computed by convolving with Gaussian
260
        derivatives, or by a simple finite-difference operation.
261

262
    Returns
263
    -------
264
    H_elems : list of ndarray
265
        Upper-diagonal elements of the hessian matrix for each pixel in the
266
        input image. In 2D, this will be a three element list containing [Hrr,
267
        Hrc, Hcc]. In nD, the list will contain ``(n**2 + n) / 2`` arrays.
268

269

270
    Notes
271
    -----
272
    The distributive property of derivatives and convolutions allows us to
273
    restate the derivative of an image, I, smoothed with a Gaussian kernel, G,
274
    as the convolution of the image with the derivative of G.
275

276
    .. math::
277

278
        \frac{\partial }{\partial x_i}(I * G) =
279
        I * \left( \frac{\partial }{\partial x_i} G \right)
280

281
    When ``use_gaussian_derivatives`` is ``True``, this property is used to
282
    compute the second order derivatives that make up the Hessian matrix.
283

284
    When ``use_gaussian_derivatives`` is ``False``, simple finite differences
285
    on a Gaussian-smoothed image are used instead.
286

287
    Examples
288
    --------
289
    >>> from skimage.feature import hessian_matrix
290
    >>> square = np.zeros((5, 5))
291
    >>> square[2, 2] = 4
292
    >>> Hrr, Hrc, Hcc = hessian_matrix(square, sigma=0.1, order='rc',
293
    ...                                use_gaussian_derivatives=False)
294
    >>> Hrc
295
    array([[ 0.,  0.,  0.,  0.,  0.],
296
           [ 0.,  1.,  0., -1.,  0.],
297
           [ 0.,  0.,  0.,  0.,  0.],
298
           [ 0., -1.,  0.,  1.,  0.],
299
           [ 0.,  0.,  0.,  0.,  0.]])
300

301
    """
302

303
    image = img_as_float(image)
304
    float_dtype = _supported_float_type(image.dtype)
305
    image = image.astype(float_dtype, copy=False)
306
    if image.ndim > 2 and order == "xy":
307
        raise ValueError("order='xy' is only supported for 2D images.")
308
    if order not in ["rc", "xy"]:
309
        raise ValueError(f"unrecognized order: {order}")
310

311
    if use_gaussian_derivatives is None:
312
        use_gaussian_derivatives = False
313
        warn(
314
            "use_gaussian_derivatives currently defaults to False, but will "
315
            "change to True in a future version. Please specify this "
316
            "argument explicitly to maintain the current behavior",
317
            category=FutureWarning,
318
            stacklevel=2,
319
        )
320

321
    if use_gaussian_derivatives:
322
        return _hessian_matrix_with_gaussian(
323
            image, sigma=sigma, mode=mode, cval=cval, order=order
324
        )
325

326
    gaussian_filtered = gaussian(image, sigma=sigma, mode=mode, cval=cval)
327

328
    gradients = np.gradient(gaussian_filtered)
329
    axes = range(image.ndim)
330

331
    if order == 'xy':
332
        axes = reversed(axes)
333

334
    H_elems = [
335
        np.gradient(gradients[ax0], axis=ax1)
336
        for ax0, ax1 in combinations_with_replacement(axes, 2)
337
    ]
338
    return H_elems
339

340

341
def hessian_matrix_det(image, sigma=1, approximate=True):
342
    """Compute the approximate Hessian Determinant over an image.
343

344
    The 2D approximate method uses box filters over integral images to
345
    compute the approximate Hessian Determinant.
346

347
    Parameters
348
    ----------
349
    image : ndarray
350
        The image over which to compute the Hessian Determinant.
351
    sigma : float, optional
352
        Standard deviation of the Gaussian kernel used for the Hessian
353
        matrix.
354
    approximate : bool, optional
355
        If ``True`` and the image is 2D, use a much faster approximate
356
        computation. This argument has no effect on 3D and higher images.
357

358
    Returns
359
    -------
360
    out : array
361
        The array of the Determinant of Hessians.
362

363
    References
364
    ----------
365
    .. [1] Herbert Bay, Andreas Ess, Tinne Tuytelaars, Luc Van Gool,
366
           "SURF: Speeded Up Robust Features"
367
           ftp://ftp.vision.ee.ethz.ch/publications/articles/eth_biwi_00517.pdf
368

369
    Notes
370
    -----
371
    For 2D images when ``approximate=True``, the running time of this method
372
    only depends on size of the image. It is independent of `sigma` as one
373
    would expect. The downside is that the result for `sigma` less than `3`
374
    is not accurate, i.e., not similar to the result obtained if someone
375
    computed the Hessian and took its determinant.
376
    """
377
    image = img_as_float(image)
378
    float_dtype = _supported_float_type(image.dtype)
379
    image = image.astype(float_dtype, copy=False)
380
    if image.ndim == 2 and approximate:
381
        integral = integral_image(image)
382
        return np.array(_hessian_matrix_det(integral, sigma))
383
    else:  # slower brute-force implementation for nD images
384
        hessian_mat_array = _symmetric_image(
385
            hessian_matrix(image, sigma, use_gaussian_derivatives=False)
386
        )
387
        return np.linalg.det(hessian_mat_array)
388

389

390
def _symmetric_compute_eigenvalues(S_elems):
391
    """Compute eigenvalues from the upper-diagonal entries of a symmetric
392
    matrix.
393

394
    Parameters
395
    ----------
396
    S_elems : list of ndarray
397
        The upper-diagonal elements of the matrix, as returned by
398
        `hessian_matrix` or `structure_tensor`.
399

400
    Returns
401
    -------
402
    eigs : ndarray
403
        The eigenvalues of the matrix, in decreasing order. The eigenvalues are
404
        the leading dimension. That is, ``eigs[i, j, k]`` contains the
405
        ith-largest eigenvalue at position (j, k).
406
    """
407

408
    if len(S_elems) == 3:  # Fast explicit formulas for 2D.
409
        M00, M01, M11 = S_elems
410
        eigs = np.empty((2, *M00.shape), M00.dtype)
411
        eigs[:] = (M00 + M11) / 2
412
        hsqrtdet = np.sqrt(M01**2 + ((M00 - M11) / 2) ** 2)
413
        eigs[0] += hsqrtdet
414
        eigs[1] -= hsqrtdet
415
        return eigs
416
    else:
417
        matrices = _symmetric_image(S_elems)
418
        # eigvalsh returns eigenvalues in increasing order. We want decreasing
419
        eigs = np.linalg.eigvalsh(matrices)[..., ::-1]
420
        leading_axes = tuple(range(eigs.ndim - 1))
421
        return np.transpose(eigs, (eigs.ndim - 1,) + leading_axes)
422

423

424
def _symmetric_image(S_elems):
425
    """Convert the upper-diagonal elements of a matrix to the full
426
    symmetric matrix.
427

428
    Parameters
429
    ----------
430
    S_elems : list of array
431
        The upper-diagonal elements of the matrix, as returned by
432
        `hessian_matrix` or `structure_tensor`.
433

434
    Returns
435
    -------
436
    image : array
437
        An array of shape ``(M, N[, ...], image.ndim, image.ndim)``,
438
        containing the matrix corresponding to each coordinate.
439
    """
440
    image = S_elems[0]
441
    symmetric_image = np.zeros(
442
        image.shape + (image.ndim, image.ndim), dtype=S_elems[0].dtype
443
    )
444
    for idx, (row, col) in enumerate(
445
        combinations_with_replacement(range(image.ndim), 2)
446
    ):
447
        symmetric_image[..., row, col] = S_elems[idx]
448
        symmetric_image[..., col, row] = S_elems[idx]
449
    return symmetric_image
450

451

452
def structure_tensor_eigenvalues(A_elems):
453
    """Compute eigenvalues of structure tensor.
454

455
    Parameters
456
    ----------
457
    A_elems : list of ndarray
458
        The upper-diagonal elements of the structure tensor, as returned
459
        by `structure_tensor`.
460

461
    Returns
462
    -------
463
    ndarray
464
        The eigenvalues of the structure tensor, in decreasing order. The
465
        eigenvalues are the leading dimension. That is, the coordinate
466
        [i, j, k] corresponds to the ith-largest eigenvalue at position (j, k).
467

468
    Examples
469
    --------
470
    >>> from skimage.feature import structure_tensor
471
    >>> from skimage.feature import structure_tensor_eigenvalues
472
    >>> square = np.zeros((5, 5))
473
    >>> square[2, 2] = 1
474
    >>> A_elems = structure_tensor(square, sigma=0.1, order='rc')
475
    >>> structure_tensor_eigenvalues(A_elems)[0]
476
    array([[0., 0., 0., 0., 0.],
477
           [0., 2., 4., 2., 0.],
478
           [0., 4., 0., 4., 0.],
479
           [0., 2., 4., 2., 0.],
480
           [0., 0., 0., 0., 0.]])
481

482
    See also
483
    --------
484
    structure_tensor
485
    """
486
    return _symmetric_compute_eigenvalues(A_elems)
487

488

489
def hessian_matrix_eigvals(H_elems):
490
    """Compute eigenvalues of Hessian matrix.
491

492
    Parameters
493
    ----------
494
    H_elems : list of ndarray
495
        The upper-diagonal elements of the Hessian matrix, as returned
496
        by `hessian_matrix`.
497

498
    Returns
499
    -------
500
    eigs : ndarray
501
        The eigenvalues of the Hessian matrix, in decreasing order. The
502
        eigenvalues are the leading dimension. That is, ``eigs[i, j, k]``
503
        contains the ith-largest eigenvalue at position (j, k).
504

505
    Examples
506
    --------
507
    >>> from skimage.feature import hessian_matrix, hessian_matrix_eigvals
508
    >>> square = np.zeros((5, 5))
509
    >>> square[2, 2] = 4
510
    >>> H_elems = hessian_matrix(square, sigma=0.1, order='rc',
511
    ...                          use_gaussian_derivatives=False)
512
    >>> hessian_matrix_eigvals(H_elems)[0]
513
    array([[ 0.,  0.,  2.,  0.,  0.],
514
           [ 0.,  1.,  0.,  1.,  0.],
515
           [ 2.,  0., -2.,  0.,  2.],
516
           [ 0.,  1.,  0.,  1.,  0.],
517
           [ 0.,  0.,  2.,  0.,  0.]])
518
    """
519
    return _symmetric_compute_eigenvalues(H_elems)
520

521

522
def shape_index(image, sigma=1, mode='constant', cval=0):
523
    """Compute the shape index.
524

525
    The shape index, as defined by Koenderink & van Doorn [1]_, is a
526
    single valued measure of local curvature, assuming the image as a 3D plane
527
    with intensities representing heights.
528

529
    It is derived from the eigenvalues of the Hessian, and its
530
    value ranges from -1 to 1 (and is undefined (=NaN) in *flat* regions),
531
    with following ranges representing following shapes:
532

533
    .. table:: Ranges of the shape index and corresponding shapes.
534

535
      ===================  =============
536
      Interval (s in ...)  Shape
537
      ===================  =============
538
      [  -1, -7/8)         Spherical cup
539
      [-7/8, -5/8)         Through
540
      [-5/8, -3/8)         Rut
541
      [-3/8, -1/8)         Saddle rut
542
      [-1/8, +1/8)         Saddle
543
      [+1/8, +3/8)         Saddle ridge
544
      [+3/8, +5/8)         Ridge
545
      [+5/8, +7/8)         Dome
546
      [+7/8,   +1]         Spherical cap
547
      ===================  =============
548

549
    Parameters
550
    ----------
551
    image : (M, N) ndarray
552
        Input image.
553
    sigma : float, optional
554
        Standard deviation used for the Gaussian kernel, which is used for
555
        smoothing the input data before Hessian eigen value calculation.
556
    mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
557
        How to handle values outside the image borders
558
    cval : float, optional
559
        Used in conjunction with mode 'constant', the value outside
560
        the image boundaries.
561

562
    Returns
563
    -------
564
    s : ndarray
565
        Shape index
566

567
    References
568
    ----------
569
    .. [1] Koenderink, J. J. & van Doorn, A. J.,
570
           "Surface shape and curvature scales",
571
           Image and Vision Computing, 1992, 10, 557-564.
572
           :DOI:`10.1016/0262-8856(92)90076-F`
573

574
    Examples
575
    --------
576
    >>> from skimage.feature import shape_index
577
    >>> square = np.zeros((5, 5))
578
    >>> square[2, 2] = 4
579
    >>> s = shape_index(square, sigma=0.1)
580
    >>> s
581
    array([[ nan,  nan, -0.5,  nan,  nan],
582
           [ nan, -0. ,  nan, -0. ,  nan],
583
           [-0.5,  nan, -1. ,  nan, -0.5],
584
           [ nan, -0. ,  nan, -0. ,  nan],
585
           [ nan,  nan, -0.5,  nan,  nan]])
586
    """
587

588
    H = hessian_matrix(
589
        image,
590
        sigma=sigma,
591
        mode=mode,
592
        cval=cval,
593
        order='rc',
594
        use_gaussian_derivatives=False,
595
    )
596
    l1, l2 = hessian_matrix_eigvals(H)
597

598
    # don't warn on divide by 0 as occurs in the docstring example
599
    with np.errstate(divide='ignore', invalid='ignore'):
600
        return (2.0 / np.pi) * np.arctan((l2 + l1) / (l2 - l1))
601

602

603
def corner_kitchen_rosenfeld(image, mode='constant', cval=0):
604
    """Compute Kitchen and Rosenfeld corner measure response image.
605

606
    The corner measure is calculated as follows::
607

608
        (imxx * imy**2 + imyy * imx**2 - 2 * imxy * imx * imy)
609
            / (imx**2 + imy**2)
610

611
    Where imx and imy are the first and imxx, imxy, imyy the second
612
    derivatives.
613

614
    Parameters
615
    ----------
616
    image : (M, N) ndarray
617
        Input image.
618
    mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
619
        How to handle values outside the image borders.
620
    cval : float, optional
621
        Used in conjunction with mode 'constant', the value outside
622
        the image boundaries.
623

624
    Returns
625
    -------
626
    response : ndarray
627
        Kitchen and Rosenfeld response image.
628

629
    References
630
    ----------
631
    .. [1] Kitchen, L., & Rosenfeld, A. (1982). Gray-level corner detection.
632
           Pattern recognition letters, 1(2), 95-102.
633
           :DOI:`10.1016/0167-8655(82)90020-4`
634
    """
635

636
    float_dtype = _supported_float_type(image.dtype)
637
    image = image.astype(float_dtype, copy=False)
638

639
    imy, imx = _compute_derivatives(image, mode=mode, cval=cval)
640
    imxy, imxx = _compute_derivatives(imx, mode=mode, cval=cval)
641
    imyy, imyx = _compute_derivatives(imy, mode=mode, cval=cval)
642

643
    numerator = imxx * imy**2 + imyy * imx**2 - 2 * imxy * imx * imy
644
    denominator = imx**2 + imy**2
645

646
    response = np.zeros_like(image, dtype=float_dtype)
647

648
    mask = denominator != 0
649
    response[mask] = numerator[mask] / denominator[mask]
650

651
    return response
652

653

654
def corner_harris(image, method='k', k=0.05, eps=1e-6, sigma=1):
655
    """Compute Harris corner measure response image.
656

657
    This corner detector uses information from the auto-correlation matrix A::
658

659
        A = [(imx**2)   (imx*imy)] = [Axx Axy]
660
            [(imx*imy)   (imy**2)]   [Axy Ayy]
661

662
    Where imx and imy are first derivatives, averaged with a gaussian filter.
663
    The corner measure is then defined as::
664

665
        det(A) - k * trace(A)**2
666

667
    or::
668

669
        2 * det(A) / (trace(A) + eps)
670

671
    Parameters
672
    ----------
673
    image : (M, N) ndarray
674
        Input image.
675
    method : {'k', 'eps'}, optional
676
        Method to compute the response image from the auto-correlation matrix.
677
    k : float, optional
678
        Sensitivity factor to separate corners from edges, typically in range
679
        `[0, 0.2]`. Small values of k result in detection of sharp corners.
680
    eps : float, optional
681
        Normalisation factor (Noble's corner measure).
682
    sigma : float, optional
683
        Standard deviation used for the Gaussian kernel, which is used as
684
        weighting function for the auto-correlation matrix.
685

686
    Returns
687
    -------
688
    response : ndarray
689
        Harris response image.
690

691
    References
692
    ----------
693
    .. [1] https://en.wikipedia.org/wiki/Corner_detection
694

695
    Examples
696
    --------
697
    >>> from skimage.feature import corner_harris, corner_peaks
698
    >>> square = np.zeros([10, 10])
699
    >>> square[2:8, 2:8] = 1
700
    >>> square.astype(int)
701
    array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
702
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
703
           [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
704
           [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
705
           [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
706
           [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
707
           [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
708
           [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
709
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
710
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
711
    >>> corner_peaks(corner_harris(square), min_distance=1)
712
    array([[2, 2],
713
           [2, 7],
714
           [7, 2],
715
           [7, 7]])
716

717
    """
718

719
    Arr, Arc, Acc = structure_tensor(image, sigma, order='rc')
720

721
    # determinant
722
    detA = Arr * Acc - Arc**2
723
    # trace
724
    traceA = Arr + Acc
725

726
    if method == 'k':
727
        response = detA - k * traceA**2
728
    else:
729
        response = 2 * detA / (traceA + eps)
730

731
    return response
732

733

734
def corner_shi_tomasi(image, sigma=1):
735
    """Compute Shi-Tomasi (Kanade-Tomasi) corner measure response image.
736

737
    This corner detector uses information from the auto-correlation matrix A::
738

739
        A = [(imx**2)   (imx*imy)] = [Axx Axy]
740
            [(imx*imy)   (imy**2)]   [Axy Ayy]
741

742
    Where imx and imy are first derivatives, averaged with a gaussian filter.
743
    The corner measure is then defined as the smaller eigenvalue of A::
744

745
        ((Axx + Ayy) - sqrt((Axx - Ayy)**2 + 4 * Axy**2)) / 2
746

747
    Parameters
748
    ----------
749
    image : (M, N) ndarray
750
        Input image.
751
    sigma : float, optional
752
        Standard deviation used for the Gaussian kernel, which is used as
753
        weighting function for the auto-correlation matrix.
754

755
    Returns
756
    -------
757
    response : ndarray
758
        Shi-Tomasi response image.
759

760
    References
761
    ----------
762
    .. [1] https://en.wikipedia.org/wiki/Corner_detection
763

764
    Examples
765
    --------
766
    >>> from skimage.feature import corner_shi_tomasi, corner_peaks
767
    >>> square = np.zeros([10, 10])
768
    >>> square[2:8, 2:8] = 1
769
    >>> square.astype(int)
770
    array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
771
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
772
           [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
773
           [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
774
           [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
775
           [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
776
           [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
777
           [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
778
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
779
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
780
    >>> corner_peaks(corner_shi_tomasi(square), min_distance=1)
781
    array([[2, 2],
782
           [2, 7],
783
           [7, 2],
784
           [7, 7]])
785

786
    """
787

788
    Arr, Arc, Acc = structure_tensor(image, sigma, order='rc')
789

790
    # minimum eigenvalue of A
791
    response = ((Arr + Acc) - np.sqrt((Arr - Acc) ** 2 + 4 * Arc**2)) / 2
792

793
    return response
794

795

796
def corner_foerstner(image, sigma=1):
797
    """Compute Foerstner corner measure response image.
798

799
    This corner detector uses information from the auto-correlation matrix A::
800

801
        A = [(imx**2)   (imx*imy)] = [Axx Axy]
802
            [(imx*imy)   (imy**2)]   [Axy Ayy]
803

804
    Where imx and imy are first derivatives, averaged with a gaussian filter.
805
    The corner measure is then defined as::
806

807
        w = det(A) / trace(A)           (size of error ellipse)
808
        q = 4 * det(A) / trace(A)**2    (roundness of error ellipse)
809

810
    Parameters
811
    ----------
812
    image : (M, N) ndarray
813
        Input image.
814
    sigma : float, optional
815
        Standard deviation used for the Gaussian kernel, which is used as
816
        weighting function for the auto-correlation matrix.
817

818
    Returns
819
    -------
820
    w : ndarray
821
        Error ellipse sizes.
822
    q : ndarray
823
        Roundness of error ellipse.
824

825
    References
826
    ----------
827
    .. [1] Förstner, W., & Gülch, E. (1987, June). A fast operator for
828
           detection and precise location of distinct points, corners and
829
           centres of circular features. In Proc. ISPRS intercommission
830
           conference on fast processing of photogrammetric data (pp. 281-305).
831
           https://cseweb.ucsd.edu/classes/sp02/cse252/foerstner/foerstner.pdf
832
    .. [2] https://en.wikipedia.org/wiki/Corner_detection
833

834
    Examples
835
    --------
836
    >>> from skimage.feature import corner_foerstner, corner_peaks
837
    >>> square = np.zeros([10, 10])
838
    >>> square[2:8, 2:8] = 1
839
    >>> square.astype(int)
840
    array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
841
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
842
           [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
843
           [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
844
           [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
845
           [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
846
           [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
847
           [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
848
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
849
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
850
    >>> w, q = corner_foerstner(square)
851
    >>> accuracy_thresh = 0.5
852
    >>> roundness_thresh = 0.3
853
    >>> foerstner = (q > roundness_thresh) * (w > accuracy_thresh) * w
854
    >>> corner_peaks(foerstner, min_distance=1)
855
    array([[2, 2],
856
           [2, 7],
857
           [7, 2],
858
           [7, 7]])
859

860
    """
861

862
    Arr, Arc, Acc = structure_tensor(image, sigma, order='rc')
863

864
    # determinant
865
    detA = Arr * Acc - Arc**2
866
    # trace
867
    traceA = Arr + Acc
868

869
    w = np.zeros_like(image, dtype=detA.dtype)
870
    q = np.zeros_like(w)
871

872
    mask = traceA != 0
873

874
    w[mask] = detA[mask] / traceA[mask]
875
    q[mask] = 4 * detA[mask] / traceA[mask] ** 2
876

877
    return w, q
878

879

880
def corner_fast(image, n=12, threshold=0.15):
881
    """Extract FAST corners for a given image.
882

883
    Parameters
884
    ----------
885
    image : (M, N) ndarray
886
        Input image.
887
    n : int, optional
888
        Minimum number of consecutive pixels out of 16 pixels on the circle
889
        that should all be either brighter or darker w.r.t testpixel.
890
        A point c on the circle is darker w.r.t test pixel p if
891
        `Ic < Ip - threshold` and brighter if `Ic > Ip + threshold`. Also
892
        stands for the n in `FAST-n` corner detector.
893
    threshold : float, optional
894
        Threshold used in deciding whether the pixels on the circle are
895
        brighter, darker or similar w.r.t. the test pixel. Decrease the
896
        threshold when more corners are desired and vice-versa.
897

898
    Returns
899
    -------
900
    response : ndarray
901
        FAST corner response image.
902

903
    References
904
    ----------
905
    .. [1] Rosten, E., & Drummond, T. (2006, May). Machine learning for
906
           high-speed corner detection. In European conference on computer
907
           vision (pp. 430-443). Springer, Berlin, Heidelberg.
908
           :DOI:`10.1007/11744023_34`
909
           http://www.edwardrosten.com/work/rosten_2006_machine.pdf
910
    .. [2] Wikipedia, "Features from accelerated segment test",
911
           https://en.wikipedia.org/wiki/Features_from_accelerated_segment_test
912

913
    Examples
914
    --------
915
    >>> from skimage.feature import corner_fast, corner_peaks
916
    >>> square = np.zeros((12, 12))
917
    >>> square[3:9, 3:9] = 1
918
    >>> square.astype(int)
919
    array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
920
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
921
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
922
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
923
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
924
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
925
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
926
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
927
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
928
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
929
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
930
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
931
    >>> corner_peaks(corner_fast(square, 9), min_distance=1)
932
    array([[3, 3],
933
           [3, 8],
934
           [8, 3],
935
           [8, 8]])
936

937
    """
938
    image = _prepare_grayscale_input_2D(image)
939

940
    image = np.ascontiguousarray(image)
941
    response = _corner_fast(image, n, threshold)
942
    return response
943

944

945
def corner_subpix(image, corners, window_size=11, alpha=0.99):
946
    """Determine subpixel position of corners.
947

948
    A statistical test decides whether the corner is defined as the
949
    intersection of two edges or a single peak. Depending on the classification
950
    result, the subpixel corner location is determined based on the local
951
    covariance of the grey-values. If the significance level for either
952
    statistical test is not sufficient, the corner cannot be classified, and
953
    the output subpixel position is set to NaN.
954

955
    Parameters
956
    ----------
957
    image : (M, N) ndarray
958
        Input image.
959
    corners : (K, 2) ndarray
960
        Corner coordinates `(row, col)`.
961
    window_size : int, optional
962
        Search window size for subpixel estimation.
963
    alpha : float, optional
964
        Significance level for corner classification.
965

966
    Returns
967
    -------
968
    positions : (K, 2) ndarray
969
        Subpixel corner positions. NaN for "not classified" corners.
970

971
    References
972
    ----------
973
    .. [1] Förstner, W., & Gülch, E. (1987, June). A fast operator for
974
           detection and precise location of distinct points, corners and
975
           centres of circular features. In Proc. ISPRS intercommission
976
           conference on fast processing of photogrammetric data (pp. 281-305).
977
           https://cseweb.ucsd.edu/classes/sp02/cse252/foerstner/foerstner.pdf
978
    .. [2] https://en.wikipedia.org/wiki/Corner_detection
979

980
    Examples
981
    --------
982
    >>> from skimage.feature import corner_harris, corner_peaks, corner_subpix
983
    >>> img = np.zeros((10, 10))
984
    >>> img[:5, :5] = 1
985
    >>> img[5:, 5:] = 1
986
    >>> img.astype(int)
987
    array([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
988
           [1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
989
           [1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
990
           [1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
991
           [1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
992
           [0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
993
           [0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
994
           [0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
995
           [0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
996
           [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]])
997
    >>> coords = corner_peaks(corner_harris(img), min_distance=2)
998
    >>> coords_subpix = corner_subpix(img, coords, window_size=7)
999
    >>> coords_subpix
1000
    array([[4.5, 4.5]])
1001

1002
    """
1003

1004
    # window extent in one direction
1005
    wext = (window_size - 1) // 2
1006

1007
    float_dtype = _supported_float_type(image.dtype)
1008
    image = image.astype(float_dtype, copy=False)
1009
    image = np.pad(image, pad_width=wext, mode='constant', constant_values=0)
1010

1011
    # add pad width, make sure to not modify the input values in-place
1012
    corners = safe_as_int(corners + wext)
1013

1014
    # normal equation arrays
1015
    N_dot = np.zeros((2, 2), dtype=float_dtype)
1016
    N_edge = np.zeros((2, 2), dtype=float_dtype)
1017
    b_dot = np.zeros((2,), dtype=float_dtype)
1018
    b_edge = np.zeros((2,), dtype=float_dtype)
1019

1020
    # critical statistical test values
1021
    redundancy = window_size**2 - 2
1022
    t_crit_dot = stats.f.isf(1 - alpha, redundancy, redundancy)
1023
    t_crit_edge = stats.f.isf(alpha, redundancy, redundancy)
1024

1025
    # coordinates of pixels within window
1026
    y, x = np.mgrid[-wext : wext + 1, -wext : wext + 1]
1027

1028
    corners_subpix = np.zeros_like(corners, dtype=float_dtype)
1029

1030
    for i, (y0, x0) in enumerate(corners):
1031
        # crop window around corner + border for sobel operator
1032
        miny = y0 - wext - 1
1033
        maxy = y0 + wext + 2
1034
        minx = x0 - wext - 1
1035
        maxx = x0 + wext + 2
1036
        window = image[miny:maxy, minx:maxx]
1037

1038
        winy, winx = _compute_derivatives(window, mode='constant', cval=0)
1039

1040
        # compute gradient squares and remove border
1041
        winx_winx = (winx * winx)[1:-1, 1:-1]
1042
        winx_winy = (winx * winy)[1:-1, 1:-1]
1043
        winy_winy = (winy * winy)[1:-1, 1:-1]
1044

1045
        # sum of squared differences (mean instead of gaussian filter)
1046
        Axx = np.sum(winx_winx)
1047
        Axy = np.sum(winx_winy)
1048
        Ayy = np.sum(winy_winy)
1049

1050
        # sum of squared differences weighted with coordinates
1051
        # (mean instead of gaussian filter)
1052
        bxx_x = np.sum(winx_winx * x)
1053
        bxx_y = np.sum(winx_winx * y)
1054
        bxy_x = np.sum(winx_winy * x)
1055
        bxy_y = np.sum(winx_winy * y)
1056
        byy_x = np.sum(winy_winy * x)
1057
        byy_y = np.sum(winy_winy * y)
1058

1059
        # normal equations for subpixel position
1060
        N_dot[0, 0] = Axx
1061
        N_dot[0, 1] = N_dot[1, 0] = -Axy
1062
        N_dot[1, 1] = Ayy
1063

1064
        N_edge[0, 0] = Ayy
1065
        N_edge[0, 1] = N_edge[1, 0] = Axy
1066
        N_edge[1, 1] = Axx
1067

1068
        b_dot[:] = bxx_y - bxy_x, byy_x - bxy_y
1069
        b_edge[:] = byy_y + bxy_x, bxx_x + bxy_y
1070

1071
        # estimated positions
1072
        try:
1073
            est_dot = np.linalg.solve(N_dot, b_dot)
1074
            est_edge = np.linalg.solve(N_edge, b_edge)
1075
        except np.linalg.LinAlgError:
1076
            # if image is constant the system is singular
1077
            corners_subpix[i, :] = np.nan, np.nan
1078
            continue
1079

1080
        # residuals
1081
        ry_dot = y - est_dot[0]
1082
        rx_dot = x - est_dot[1]
1083
        ry_edge = y - est_edge[0]
1084
        rx_edge = x - est_edge[1]
1085
        # squared residuals
1086
        rxx_dot = rx_dot * rx_dot
1087
        rxy_dot = rx_dot * ry_dot
1088
        ryy_dot = ry_dot * ry_dot
1089
        rxx_edge = rx_edge * rx_edge
1090
        rxy_edge = rx_edge * ry_edge
1091
        ryy_edge = ry_edge * ry_edge
1092

1093
        # determine corner class (dot or edge)
1094
        # variance for different models
1095
        var_dot = np.sum(
1096
            winx_winx * ryy_dot - 2 * winx_winy * rxy_dot + winy_winy * rxx_dot
1097
        )
1098
        var_edge = np.sum(
1099
            winy_winy * ryy_edge + 2 * winx_winy * rxy_edge + winx_winx * rxx_edge
1100
        )
1101

1102
        # test value (F-distributed)
1103
        if var_dot < np.spacing(1) and var_edge < np.spacing(1):
1104
            t = np.nan
1105
        elif var_dot == 0:
1106
            t = np.inf
1107
        else:
1108
            t = var_edge / var_dot
1109

1110
        # 1 for edge, -1 for dot, 0 for "not classified"
1111
        corner_class = int(t < t_crit_edge) - int(t > t_crit_dot)
1112

1113
        if corner_class == -1:
1114
            corners_subpix[i, :] = y0 + est_dot[0], x0 + est_dot[1]
1115
        elif corner_class == 0:
1116
            corners_subpix[i, :] = np.nan, np.nan
1117
        elif corner_class == 1:
1118
            corners_subpix[i, :] = y0 + est_edge[0], x0 + est_edge[1]
1119

1120
    # subtract pad width
1121
    corners_subpix -= wext
1122

1123
    return corners_subpix
1124

1125

1126
def corner_peaks(
1127
    image,
1128
    min_distance=1,
1129
    threshold_abs=None,
1130
    threshold_rel=None,
1131
    exclude_border=True,
1132
    indices=True,
1133
    num_peaks=np.inf,
1134
    footprint=None,
1135
    labels=None,
1136
    *,
1137
    num_peaks_per_label=np.inf,
1138
    p_norm=np.inf,
1139
):
1140
    """Find peaks in corner measure response image.
1141

1142
    This differs from `skimage.feature.peak_local_max` in that it suppresses
1143
    multiple connected peaks with the same accumulator value.
1144

1145
    Parameters
1146
    ----------
1147
    image : (M, N) ndarray
1148
        Input image.
1149
    min_distance : int, optional
1150
        The minimal allowed distance separating peaks.
1151
    * : *
1152
        See :py:meth:`skimage.feature.peak_local_max`.
1153
    p_norm : float
1154
        Which Minkowski p-norm to use. Should be in the range [1, inf].
1155
        A finite large p may cause a ValueError if overflow can occur.
1156
        ``inf`` corresponds to the Chebyshev distance and 2 to the
1157
        Euclidean distance.
1158

1159
    Returns
1160
    -------
1161
    output : ndarray or ndarray of bools
1162

1163
        * If `indices = True`  : (row, column, ...) coordinates of peaks.
1164
        * If `indices = False` : Boolean array shaped like `image`, with peaks
1165
          represented by True values.
1166

1167
    See also
1168
    --------
1169
    skimage.feature.peak_local_max
1170

1171
    Notes
1172
    -----
1173
    .. versionchanged:: 0.18
1174
        The default value of `threshold_rel` has changed to None, which
1175
        corresponds to letting `skimage.feature.peak_local_max` decide on the
1176
        default. This is equivalent to `threshold_rel=0`.
1177

1178
    The `num_peaks` limit is applied before suppression of connected peaks.
1179
    To limit the number of peaks after suppression, set `num_peaks=np.inf` and
1180
    post-process the output of this function.
1181

1182
    Examples
1183
    --------
1184
    >>> from skimage.feature import peak_local_max
1185
    >>> response = np.zeros((5, 5))
1186
    >>> response[2:4, 2:4] = 1
1187
    >>> response
1188
    array([[0., 0., 0., 0., 0.],
1189
           [0., 0., 0., 0., 0.],
1190
           [0., 0., 1., 1., 0.],
1191
           [0., 0., 1., 1., 0.],
1192
           [0., 0., 0., 0., 0.]])
1193
    >>> peak_local_max(response)
1194
    array([[2, 2],
1195
           [2, 3],
1196
           [3, 2],
1197
           [3, 3]])
1198
    >>> corner_peaks(response)
1199
    array([[2, 2]])
1200

1201
    """
1202
    if np.isinf(num_peaks):
1203
        num_peaks = None
1204

1205
    # Get the coordinates of the detected peaks
1206
    coords = peak_local_max(
1207
        image,
1208
        min_distance=min_distance,
1209
        threshold_abs=threshold_abs,
1210
        threshold_rel=threshold_rel,
1211
        exclude_border=exclude_border,
1212
        num_peaks=np.inf,
1213
        footprint=footprint,
1214
        labels=labels,
1215
        num_peaks_per_label=num_peaks_per_label,
1216
    )
1217

1218
    if len(coords):
1219
        # Use KDtree to find the peaks that are too close to each other
1220
        tree = spatial.cKDTree(coords)
1221

1222
        rejected_peaks_indices = set()
1223
        for idx, point in enumerate(coords):
1224
            if idx not in rejected_peaks_indices:
1225
                candidates = tree.query_ball_point(point, r=min_distance, p=p_norm)
1226
                candidates.remove(idx)
1227
                rejected_peaks_indices.update(candidates)
1228

1229
        # Remove the peaks that are too close to each other
1230
        coords = np.delete(coords, tuple(rejected_peaks_indices), axis=0)[:num_peaks]
1231

1232
    if indices:
1233
        return coords
1234

1235
    peaks = np.zeros_like(image, dtype=bool)
1236
    peaks[tuple(coords.T)] = True
1237

1238
    return peaks
1239

1240

1241
def corner_moravec(image, window_size=1):
1242
    """Compute Moravec corner measure response image.
1243

1244
    This is one of the simplest corner detectors and is comparatively fast but
1245
    has several limitations (e.g. not rotation invariant).
1246

1247
    Parameters
1248
    ----------
1249
    image : (M, N) ndarray
1250
        Input image.
1251
    window_size : int, optional
1252
        Window size.
1253

1254
    Returns
1255
    -------
1256
    response : ndarray
1257
        Moravec response image.
1258

1259
    References
1260
    ----------
1261
    .. [1] https://en.wikipedia.org/wiki/Corner_detection
1262

1263
    Examples
1264
    --------
1265
    >>> from skimage.feature import corner_moravec
1266
    >>> square = np.zeros([7, 7])
1267
    >>> square[3, 3] = 1
1268
    >>> square.astype(int)
1269
    array([[0, 0, 0, 0, 0, 0, 0],
1270
           [0, 0, 0, 0, 0, 0, 0],
1271
           [0, 0, 0, 0, 0, 0, 0],
1272
           [0, 0, 0, 1, 0, 0, 0],
1273
           [0, 0, 0, 0, 0, 0, 0],
1274
           [0, 0, 0, 0, 0, 0, 0],
1275
           [0, 0, 0, 0, 0, 0, 0]])
1276
    >>> corner_moravec(square).astype(int)
1277
    array([[0, 0, 0, 0, 0, 0, 0],
1278
           [0, 0, 0, 0, 0, 0, 0],
1279
           [0, 0, 1, 1, 1, 0, 0],
1280
           [0, 0, 1, 2, 1, 0, 0],
1281
           [0, 0, 1, 1, 1, 0, 0],
1282
           [0, 0, 0, 0, 0, 0, 0],
1283
           [0, 0, 0, 0, 0, 0, 0]])
1284
    """
1285
    image = img_as_float(image)
1286
    float_dtype = _supported_float_type(image.dtype)
1287
    image = image.astype(float_dtype, copy=False)
1288
    return _corner_moravec(np.ascontiguousarray(image), window_size)
1289

1290

1291
def corner_orientations(image, corners, mask):
1292
    """Compute the orientation of corners.
1293

1294
    The orientation of corners is computed using the first order central moment
1295
    i.e. the center of mass approach. The corner orientation is the angle of
1296
    the vector from the corner coordinate to the intensity centroid in the
1297
    local neighborhood around the corner calculated using first order central
1298
    moment.
1299

1300
    Parameters
1301
    ----------
1302
    image : (M, N) array
1303
        Input grayscale image.
1304
    corners : (K, 2) array
1305
        Corner coordinates as ``(row, col)``.
1306
    mask : 2D array
1307
        Mask defining the local neighborhood of the corner used for the
1308
        calculation of the central moment.
1309

1310
    Returns
1311
    -------
1312
    orientations : (K, 1) array
1313
        Orientations of corners in the range [-pi, pi].
1314

1315
    References
1316
    ----------
1317
    .. [1] Ethan Rublee, Vincent Rabaud, Kurt Konolige and Gary Bradski
1318
          "ORB : An efficient alternative to SIFT and SURF"
1319
          http://www.vision.cs.chubu.ac.jp/CV-R/pdf/Rublee_iccv2011.pdf
1320
    .. [2] Paul L. Rosin, "Measuring Corner Properties"
1321
          http://users.cs.cf.ac.uk/Paul.Rosin/corner2.pdf
1322

1323
    Examples
1324
    --------
1325
    >>> from skimage.morphology import octagon
1326
    >>> from skimage.feature import (corner_fast, corner_peaks,
1327
    ...                              corner_orientations)
1328
    >>> square = np.zeros((12, 12))
1329
    >>> square[3:9, 3:9] = 1
1330
    >>> square.astype(int)
1331
    array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
1332
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
1333
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
1334
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
1335
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
1336
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
1337
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
1338
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
1339
           [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
1340
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
1341
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
1342
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
1343
    >>> corners = corner_peaks(corner_fast(square, 9), min_distance=1)
1344
    >>> corners
1345
    array([[3, 3],
1346
           [3, 8],
1347
           [8, 3],
1348
           [8, 8]])
1349
    >>> orientations = corner_orientations(square, corners, octagon(3, 2))
1350
    >>> np.rad2deg(orientations)
1351
    array([  45.,  135.,  -45., -135.])
1352

1353
    """
1354
    image = _prepare_grayscale_input_2D(image)
1355
    return _corner_orientations(image, corners, mask)
1356

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.