qemu
1/*
2* Atomic operations on 64-bit quantities.
3*
4* Copyright (C) 2017 Red Hat, Inc.
5*
6* Author: Paolo Bonzini <pbonzini@redhat.com>
7*
8* This work is licensed under the terms of the GNU GPL, version 2 or later.
9* See the COPYING file in the top-level directory.
10*/
11
12#include "qemu/osdep.h"13#include "qemu/atomic.h"14#include "qemu/stats64.h"15#include "qemu/processor.h"16
17#ifndef CONFIG_ATOMIC6418static inline void stat64_rdlock(Stat64 *s)19{
20/* Keep out incoming writers to avoid them starving us. */21qatomic_add(&s->lock, 2);22
23/* If there is a concurrent writer, wait for it. */24while (qatomic_read(&s->lock) & 1) {25cpu_relax();26}27}
28
29static inline void stat64_rdunlock(Stat64 *s)30{
31qatomic_sub(&s->lock, 2);32}
33
34static inline bool stat64_wrtrylock(Stat64 *s)35{
36return qatomic_cmpxchg(&s->lock, 0, 1) == 0;37}
38
39static inline void stat64_wrunlock(Stat64 *s)40{
41qatomic_dec(&s->lock);42}
43
44uint64_t stat64_get(const Stat64 *s)45{
46uint32_t high, low;47
48stat64_rdlock((Stat64 *)s);49
50/* 64-bit writes always take the lock, so we can read in51* any order.
52*/
53high = qatomic_read(&s->high);54low = qatomic_read(&s->low);55stat64_rdunlock((Stat64 *)s);56
57return ((uint64_t)high << 32) | low;58}
59
60void stat64_set(Stat64 *s, uint64_t val)61{
62while (!stat64_wrtrylock(s)) {63cpu_relax();64}65
66qatomic_set(&s->high, val >> 32);67qatomic_set(&s->low, val);68stat64_wrunlock(s);69}
70
71bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high)72{
73uint32_t old;74
75if (!stat64_wrtrylock(s)) {76cpu_relax();77return false;78}79
80/* 64-bit reads always take the lock, so they don't care about the81* order of our update. By updating s->low first, we can check
82* whether we have to carry into s->high.
83*/
84old = qatomic_fetch_add(&s->low, low);85high += (old + low) < old;86qatomic_add(&s->high, high);87stat64_wrunlock(s);88return true;89}
90
91bool stat64_min_slow(Stat64 *s, uint64_t value)92{
93uint32_t high, low;94uint64_t orig;95
96if (!stat64_wrtrylock(s)) {97cpu_relax();98return false;99}100
101high = qatomic_read(&s->high);102low = qatomic_read(&s->low);103
104orig = ((uint64_t)high << 32) | low;105if (value < orig) {106/* We have to set low before high, just like stat64_min reads107* high before low. The value may become higher temporarily, but
108* stat64_get does not notice (it takes the lock) and the only ill
109* effect on stat64_min is that the slow path may be triggered
110* unnecessarily.
111*/
112qatomic_set(&s->low, (uint32_t)value);113smp_wmb();114qatomic_set(&s->high, value >> 32);115}116stat64_wrunlock(s);117return true;118}
119
120bool stat64_max_slow(Stat64 *s, uint64_t value)121{
122uint32_t high, low;123uint64_t orig;124
125if (!stat64_wrtrylock(s)) {126cpu_relax();127return false;128}129
130high = qatomic_read(&s->high);131low = qatomic_read(&s->low);132
133orig = ((uint64_t)high << 32) | low;134if (value > orig) {135/* We have to set low before high, just like stat64_max reads136* high before low. The value may become lower temporarily, but
137* stat64_get does not notice (it takes the lock) and the only ill
138* effect on stat64_max is that the slow path may be triggered
139* unnecessarily.
140*/
141qatomic_set(&s->low, (uint32_t)value);142smp_wmb();143qatomic_set(&s->high, value >> 32);144}145stat64_wrunlock(s);146return true;147}
148#endif149