diff options
Diffstat (limited to 'contrib/qemu/include')
59 files changed, 9340 insertions, 0 deletions
diff --git a/contrib/qemu/include/block/aio.h b/contrib/qemu/include/block/aio.h new file mode 100644 index 00000000000..183679374fa --- /dev/null +++ b/contrib/qemu/include/block/aio.h @@ -0,0 +1,247 @@ +/* + * QEMU aio implementation + * + * Copyright IBM, Corp. 2008 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_AIO_H +#define QEMU_AIO_H + +#include "qemu-common.h" +#include "qemu/queue.h" +#include "qemu/event_notifier.h" + +typedef struct BlockDriverAIOCB BlockDriverAIOCB; +typedef void BlockDriverCompletionFunc(void *opaque, int ret); + +typedef struct AIOCBInfo { + void (*cancel)(BlockDriverAIOCB *acb); + size_t aiocb_size; +} AIOCBInfo; + +struct BlockDriverAIOCB { + const AIOCBInfo *aiocb_info; + BlockDriverState *bs; + BlockDriverCompletionFunc *cb; + void *opaque; +}; + +void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, + BlockDriverCompletionFunc *cb, void *opaque); +void qemu_aio_release(void *p); + +typedef struct AioHandler AioHandler; +typedef void QEMUBHFunc(void *opaque); +typedef void IOHandler(void *opaque); + +typedef struct AioContext { + GSource source; + + /* The list of registered AIO handlers */ + QLIST_HEAD(, AioHandler) aio_handlers; + + /* This is a simple lock used to protect the aio_handlers list. + * Specifically, it's used to ensure that no callbacks are removed while + * we're walking and dispatching callbacks. + */ + int walking_handlers; + + /* Anchor of the list of Bottom Halves belonging to the context */ + struct QEMUBH *first_bh; + + /* A simple lock used to protect the first_bh list, and ensure that + * no callbacks are removed while we're walking and dispatching callbacks. + */ + int walking_bh; + + /* Used for aio_notify. */ + EventNotifier notifier; + + /* GPollFDs for aio_poll() */ + GArray *pollfds; + + /* Thread pool for performing work and receiving completion callbacks */ + struct ThreadPool *thread_pool; +} AioContext; + +/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */ +typedef int (AioFlushEventNotifierHandler)(EventNotifier *e); + +/** + * aio_context_new: Allocate a new AioContext. + * + * AioContext provide a mini event-loop that can be waited on synchronously. + * They also provide bottom halves, a service to execute a piece of code + * as soon as possible. + */ +AioContext *aio_context_new(void); + +/** + * aio_context_ref: + * @ctx: The AioContext to operate on. + * + * Add a reference to an AioContext. + */ +void aio_context_ref(AioContext *ctx); + +/** + * aio_context_unref: + * @ctx: The AioContext to operate on. + * + * Drop a reference to an AioContext. + */ +void aio_context_unref(AioContext *ctx); + +/** + * aio_bh_new: Allocate a new bottom half structure. + * + * Bottom halves are lightweight callbacks whose invocation is guaranteed + * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure + * is opaque and must be allocated prior to its use. + */ +QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque); + +/** + * aio_notify: Force processing of pending events. + * + * Similar to signaling a condition variable, aio_notify forces + * aio_wait to exit, so that the next call will re-examine pending events. + * The caller of aio_notify will usually call aio_wait again very soon, + * or go through another iteration of the GLib main loop. Hence, aio_notify + * also has the side effect of recalculating the sets of file descriptors + * that the main loop waits for. + * + * Calling aio_notify is rarely necessary, because for example scheduling + * a bottom half calls it already. + */ +void aio_notify(AioContext *ctx); + +/** + * aio_bh_poll: Poll bottom halves for an AioContext. + * + * These are internal functions used by the QEMU main loop. + */ +int aio_bh_poll(AioContext *ctx); + +/** + * qemu_bh_schedule: Schedule a bottom half. + * + * Scheduling a bottom half interrupts the main loop and causes the + * execution of the callback that was passed to qemu_bh_new. + * + * Bottom halves that are scheduled from a bottom half handler are instantly + * invoked. This can create an infinite loop if a bottom half handler + * schedules itself. + * + * @bh: The bottom half to be scheduled. + */ +void qemu_bh_schedule(QEMUBH *bh); + +/** + * qemu_bh_cancel: Cancel execution of a bottom half. + * + * Canceling execution of a bottom half undoes the effect of calls to + * qemu_bh_schedule without freeing its resources yet. While cancellation + * itself is also wait-free and thread-safe, it can of course race with the + * loop that executes bottom halves unless you are holding the iothread + * mutex. This makes it mostly useless if you are not holding the mutex. + * + * @bh: The bottom half to be canceled. + */ +void qemu_bh_cancel(QEMUBH *bh); + +/** + *qemu_bh_delete: Cancel execution of a bottom half and free its resources. + * + * Deleting a bottom half frees the memory that was allocated for it by + * qemu_bh_new. It also implies canceling the bottom half if it was + * scheduled. + * + * @bh: The bottom half to be deleted. + */ +void qemu_bh_delete(QEMUBH *bh); + +/* Return whether there are any pending callbacks from the GSource + * attached to the AioContext. + * + * This is used internally in the implementation of the GSource. + */ +bool aio_pending(AioContext *ctx); + +/* Progress in completing AIO work to occur. This can issue new pending + * aio as a result of executing I/O completion or bh callbacks. + * + * If there is no pending AIO operation or completion (bottom half), + * return false. If there are pending AIO operations of bottom halves, + * return true. + * + * If there are no pending bottom halves, but there are pending AIO + * operations, it may not be possible to make any progress without + * blocking. If @blocking is true, this function will wait until one + * or more AIO events have completed, to ensure something has moved + * before returning. + */ +bool aio_poll(AioContext *ctx, bool blocking); + +#ifdef CONFIG_POSIX +/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */ +typedef int (AioFlushHandler)(void *opaque); + +/* Register a file descriptor and associated callbacks. Behaves very similarly + * to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will + * be invoked when using qemu_aio_wait(). + * + * Code that invokes AIO completion functions should rely on this function + * instead of qemu_set_fd_handler[2]. + */ +void aio_set_fd_handler(AioContext *ctx, + int fd, + IOHandler *io_read, + IOHandler *io_write, + AioFlushHandler *io_flush, + void *opaque); +#endif + +/* Register an event notifier and associated callbacks. Behaves very similarly + * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks + * will be invoked when using qemu_aio_wait(). + * + * Code that invokes AIO completion functions should rely on this function + * instead of event_notifier_set_handler. + */ +void aio_set_event_notifier(AioContext *ctx, + EventNotifier *notifier, + EventNotifierHandler *io_read, + AioFlushEventNotifierHandler *io_flush); + +/* Return a GSource that lets the main loop poll the file descriptors attached + * to this AioContext. + */ +GSource *aio_get_g_source(AioContext *ctx); + +/* Return the ThreadPool bound to this AioContext */ +struct ThreadPool *aio_get_thread_pool(AioContext *ctx); + +/* Functions to operate on the main QEMU AioContext. */ + +bool qemu_aio_wait(void); +void qemu_aio_set_event_notifier(EventNotifier *notifier, + EventNotifierHandler *io_read, + AioFlushEventNotifierHandler *io_flush); + +#ifdef CONFIG_POSIX +void qemu_aio_set_fd_handler(int fd, + IOHandler *io_read, + IOHandler *io_write, + AioFlushHandler *io_flush, + void *opaque); +#endif + +#endif diff --git a/contrib/qemu/include/block/block.h b/contrib/qemu/include/block/block.h new file mode 100644 index 00000000000..b6b9014a9ce --- /dev/null +++ b/contrib/qemu/include/block/block.h @@ -0,0 +1,443 @@ +#ifndef BLOCK_H +#define BLOCK_H + +#include "block/aio.h" +#include "qemu-common.h" +#include "qemu/option.h" +#include "block/coroutine.h" +#include "qapi/qmp/qobject.h" +#include "qapi-types.h" + +/* block.c */ +typedef struct BlockDriver BlockDriver; +typedef struct BlockJob BlockJob; + +typedef struct BlockDriverInfo { + /* in bytes, 0 if irrelevant */ + int cluster_size; + /* offset at which the VM state can be saved (0 if not possible) */ + int64_t vm_state_offset; + bool is_dirty; +} BlockDriverInfo; + +typedef struct BlockFragInfo { + uint64_t allocated_clusters; + uint64_t total_clusters; + uint64_t fragmented_clusters; + uint64_t compressed_clusters; +} BlockFragInfo; + +/* Callbacks for block device models */ +typedef struct BlockDevOps { + /* + * Runs when virtual media changed (monitor commands eject, change) + * Argument load is true on load and false on eject. + * Beware: doesn't run when a host device's physical media + * changes. Sure would be useful if it did. + * Device models with removable media must implement this callback. + */ + void (*change_media_cb)(void *opaque, bool load); + /* + * Runs when an eject request is issued from the monitor, the tray + * is closed, and the medium is locked. + * Device models that do not implement is_medium_locked will not need + * this callback. Device models that can lock the medium or tray might + * want to implement the callback and unlock the tray when "force" is + * true, even if they do not support eject requests. + */ + void (*eject_request_cb)(void *opaque, bool force); + /* + * Is the virtual tray open? + * Device models implement this only when the device has a tray. + */ + bool (*is_tray_open)(void *opaque); + /* + * Is the virtual medium locked into the device? + * Device models implement this only when device has such a lock. + */ + bool (*is_medium_locked)(void *opaque); + /* + * Runs when the size changed (e.g. monitor command block_resize) + */ + void (*resize_cb)(void *opaque); +} BlockDevOps; + +#define BDRV_O_RDWR 0x0002 +#define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save writes in a snapshot */ +#define BDRV_O_NOCACHE 0x0020 /* do not use the host page cache */ +#define BDRV_O_CACHE_WB 0x0040 /* use write-back caching */ +#define BDRV_O_NATIVE_AIO 0x0080 /* use native AIO instead of the thread pool */ +#define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */ +#define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */ +#define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */ +#define BDRV_O_INCOMING 0x0800 /* consistency hint for incoming migration */ +#define BDRV_O_CHECK 0x1000 /* open solely for consistency check */ +#define BDRV_O_ALLOW_RDWR 0x2000 /* allow reopen to change from r/o to r/w */ +#define BDRV_O_UNMAP 0x4000 /* execute guest UNMAP/TRIM operations */ + +#define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH) + +#define BDRV_SECTOR_BITS 9 +#define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS) +#define BDRV_SECTOR_MASK ~(BDRV_SECTOR_SIZE - 1) + +typedef enum { + BDRV_ACTION_REPORT, BDRV_ACTION_IGNORE, BDRV_ACTION_STOP +} BlockErrorAction; + +typedef QSIMPLEQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue; + +typedef struct BDRVReopenState { + BlockDriverState *bs; + int flags; + void *opaque; +} BDRVReopenState; + + +void bdrv_iostatus_enable(BlockDriverState *bs); +void bdrv_iostatus_reset(BlockDriverState *bs); +void bdrv_iostatus_disable(BlockDriverState *bs); +bool bdrv_iostatus_is_enabled(const BlockDriverState *bs); +void bdrv_iostatus_set_err(BlockDriverState *bs, int error); +void bdrv_info_print(Monitor *mon, const QObject *data); +void bdrv_info(Monitor *mon, QObject **ret_data); +void bdrv_stats_print(Monitor *mon, const QObject *data); +void bdrv_info_stats(Monitor *mon, QObject **ret_data); + +/* disk I/O throttling */ +void bdrv_io_limits_enable(BlockDriverState *bs); +void bdrv_io_limits_disable(BlockDriverState *bs); +bool bdrv_io_limits_enabled(BlockDriverState *bs); + +void bdrv_init(void); +void bdrv_init_with_whitelist(void); +BlockDriver *bdrv_find_protocol(const char *filename, + bool allow_protocol_prefix); +BlockDriver *bdrv_find_format(const char *format_name); +BlockDriver *bdrv_find_whitelisted_format(const char *format_name, + bool readonly); +int bdrv_create(BlockDriver *drv, const char* filename, + QEMUOptionParameter *options); +int bdrv_create_file(const char* filename, QEMUOptionParameter *options); +BlockDriverState *bdrv_new(const char *device_name); +void bdrv_make_anon(BlockDriverState *bs); +void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old); +void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top); +void bdrv_delete(BlockDriverState *bs); +int bdrv_parse_cache_flags(const char *mode, int *flags); +int bdrv_parse_discard_flags(const char *mode, int *flags); +int bdrv_file_open(BlockDriverState **pbs, const char *filename, + QDict *options, int flags); +int bdrv_open_backing_file(BlockDriverState *bs, QDict *options); +int bdrv_open(BlockDriverState *bs, const char *filename, QDict *options, + int flags, BlockDriver *drv); +BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, + BlockDriverState *bs, int flags); +int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp); +int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp); +int bdrv_reopen_prepare(BDRVReopenState *reopen_state, + BlockReopenQueue *queue, Error **errp); +void bdrv_reopen_commit(BDRVReopenState *reopen_state); +void bdrv_reopen_abort(BDRVReopenState *reopen_state); +void bdrv_close(BlockDriverState *bs); +void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify); +int bdrv_attach_dev(BlockDriverState *bs, void *dev); +void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev); +void bdrv_detach_dev(BlockDriverState *bs, void *dev); +void *bdrv_get_attached_dev(BlockDriverState *bs); +void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops, + void *opaque); +void bdrv_dev_eject_request(BlockDriverState *bs, bool force); +bool bdrv_dev_has_removable_media(BlockDriverState *bs); +bool bdrv_dev_is_tray_open(BlockDriverState *bs); +bool bdrv_dev_is_medium_locked(BlockDriverState *bs); +int bdrv_read(BlockDriverState *bs, int64_t sector_num, + uint8_t *buf, int nb_sectors); +int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num, + uint8_t *buf, int nb_sectors); +int bdrv_write(BlockDriverState *bs, int64_t sector_num, + const uint8_t *buf, int nb_sectors); +int bdrv_writev(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov); +int bdrv_pread(BlockDriverState *bs, int64_t offset, + void *buf, int count); +int bdrv_pwrite(BlockDriverState *bs, int64_t offset, + const void *buf, int count); +int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov); +int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset, + const void *buf, int count); +int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov); +int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); +int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov); +/* + * Efficiently zero a region of the disk image. Note that this is a regular + * I/O request like read or write and should have a reasonable size. This + * function is not suitable for zeroing the entire image in a single request + * because it may allocate memory for the entire region. + */ +int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, int64_t sector_num, + int nb_sectors); +int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, int *pnum); +int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top, + BlockDriverState *base, + int64_t sector_num, + int nb_sectors, int *pnum); +BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs, + const char *backing_file); +int bdrv_get_backing_file_depth(BlockDriverState *bs); +int bdrv_truncate(BlockDriverState *bs, int64_t offset); +int64_t bdrv_getlength(BlockDriverState *bs); +int64_t bdrv_get_allocated_file_size(BlockDriverState *bs); +void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr); +int bdrv_commit(BlockDriverState *bs); +int bdrv_commit_all(void); +int bdrv_change_backing_file(BlockDriverState *bs, + const char *backing_file, const char *backing_fmt); +void bdrv_register(BlockDriver *bdrv); +int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top, + BlockDriverState *base); +BlockDriverState *bdrv_find_overlay(BlockDriverState *active, + BlockDriverState *bs); +BlockDriverState *bdrv_find_base(BlockDriverState *bs); + + +typedef struct BdrvCheckResult { + int corruptions; + int leaks; + int check_errors; + int corruptions_fixed; + int leaks_fixed; + int64_t image_end_offset; + BlockFragInfo bfi; +} BdrvCheckResult; + +typedef enum { + BDRV_FIX_LEAKS = 1, + BDRV_FIX_ERRORS = 2, +} BdrvCheckMode; + +int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix); + +/* async block I/O */ +typedef void BlockDriverDirtyHandler(BlockDriverState *bs, int64_t sector, + int sector_num); +BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, + QEMUIOVector *iov, int nb_sectors, + BlockDriverCompletionFunc *cb, void *opaque); +BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, + QEMUIOVector *iov, int nb_sectors, + BlockDriverCompletionFunc *cb, void *opaque); +BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs, + BlockDriverCompletionFunc *cb, void *opaque); +BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, + BlockDriverCompletionFunc *cb, void *opaque); +void bdrv_aio_cancel(BlockDriverAIOCB *acb); + +typedef struct BlockRequest { + /* Fields to be filled by multiwrite caller */ + int64_t sector; + int nb_sectors; + QEMUIOVector *qiov; + BlockDriverCompletionFunc *cb; + void *opaque; + + /* Filled by multiwrite implementation */ + int error; +} BlockRequest; + +int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, + int num_reqs); + +/* sg packet commands */ +int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf); +BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, + unsigned long int req, void *buf, + BlockDriverCompletionFunc *cb, void *opaque); + +/* Invalidate any cached metadata used by image formats */ +void bdrv_invalidate_cache(BlockDriverState *bs); +void bdrv_invalidate_cache_all(void); + +void bdrv_clear_incoming_migration_all(void); + +/* Ensure contents are flushed to disk. */ +int bdrv_flush(BlockDriverState *bs); +int coroutine_fn bdrv_co_flush(BlockDriverState *bs); +int bdrv_flush_all(void); +void bdrv_close_all(void); +void bdrv_drain_all(void); + +int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors); +int bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors); +int bdrv_has_zero_init_1(BlockDriverState *bs); +int bdrv_has_zero_init(BlockDriverState *bs); +int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors, + int *pnum); +int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base, + int64_t sector_num, int nb_sectors, int *pnum); + +void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error, + BlockdevOnError on_write_error); +BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read); +BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error); +void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action, + bool is_read, int error); +int bdrv_is_read_only(BlockDriverState *bs); +int bdrv_is_sg(BlockDriverState *bs); +int bdrv_enable_write_cache(BlockDriverState *bs); +void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce); +int bdrv_is_inserted(BlockDriverState *bs); +int bdrv_media_changed(BlockDriverState *bs); +void bdrv_lock_medium(BlockDriverState *bs, bool locked); +void bdrv_eject(BlockDriverState *bs, bool eject_flag); +const char *bdrv_get_format_name(BlockDriverState *bs); +BlockDriverState *bdrv_find(const char *name); +BlockDriverState *bdrv_next(BlockDriverState *bs); +void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), + void *opaque); +int bdrv_is_encrypted(BlockDriverState *bs); +int bdrv_key_required(BlockDriverState *bs); +int bdrv_set_key(BlockDriverState *bs, const char *key); +int bdrv_query_missing_keys(void); +void bdrv_iterate_format(void (*it)(void *opaque, const char *name), + void *opaque); +const char *bdrv_get_device_name(BlockDriverState *bs); +int bdrv_get_flags(BlockDriverState *bs); +int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, + const uint8_t *buf, int nb_sectors); +int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi); +void bdrv_round_to_clusters(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, + int64_t *cluster_sector_num, + int *cluster_nb_sectors); + +const char *bdrv_get_encrypted_filename(BlockDriverState *bs); +void bdrv_get_backing_filename(BlockDriverState *bs, + char *filename, int filename_size); +void bdrv_get_full_backing_filename(BlockDriverState *bs, + char *dest, size_t sz); +int bdrv_is_snapshot(BlockDriverState *bs); + +int path_is_absolute(const char *path); +void path_combine(char *dest, int dest_size, + const char *base_path, + const char *filename); + +int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); +int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, + int64_t pos, int size); + +int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, + int64_t pos, int size); + +void bdrv_img_create(const char *filename, const char *fmt, + const char *base_filename, const char *base_fmt, + char *options, uint64_t img_size, int flags, + Error **errp, bool quiet); + +void bdrv_set_buffer_alignment(BlockDriverState *bs, int align); +void *qemu_blockalign(BlockDriverState *bs, size_t size); +bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov); + +struct HBitmapIter; +void bdrv_set_dirty_tracking(BlockDriverState *bs, int granularity); +int bdrv_get_dirty(BlockDriverState *bs, int64_t sector); +void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors); +void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors); +void bdrv_dirty_iter_init(BlockDriverState *bs, struct HBitmapIter *hbi); +int64_t bdrv_get_dirty_count(BlockDriverState *bs); + +void bdrv_enable_copy_on_read(BlockDriverState *bs); +void bdrv_disable_copy_on_read(BlockDriverState *bs); + +void bdrv_set_in_use(BlockDriverState *bs, int in_use); +int bdrv_in_use(BlockDriverState *bs); + +#ifdef CONFIG_LINUX_AIO +int raw_get_aio_fd(BlockDriverState *bs); +#else +static inline int raw_get_aio_fd(BlockDriverState *bs) +{ + return -ENOTSUP; +} +#endif + +enum BlockAcctType { + BDRV_ACCT_READ, + BDRV_ACCT_WRITE, + BDRV_ACCT_FLUSH, + BDRV_MAX_IOTYPE, +}; + +typedef struct BlockAcctCookie { + int64_t bytes; + int64_t start_time_ns; + enum BlockAcctType type; +} BlockAcctCookie; + +void bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, + int64_t bytes, enum BlockAcctType type); +void bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie); + +typedef enum { + BLKDBG_L1_UPDATE, + + BLKDBG_L1_GROW_ALLOC_TABLE, + BLKDBG_L1_GROW_WRITE_TABLE, + BLKDBG_L1_GROW_ACTIVATE_TABLE, + + BLKDBG_L2_LOAD, + BLKDBG_L2_UPDATE, + BLKDBG_L2_UPDATE_COMPRESSED, + BLKDBG_L2_ALLOC_COW_READ, + BLKDBG_L2_ALLOC_WRITE, + + BLKDBG_READ_AIO, + BLKDBG_READ_BACKING_AIO, + BLKDBG_READ_COMPRESSED, + + BLKDBG_WRITE_AIO, + BLKDBG_WRITE_COMPRESSED, + + BLKDBG_VMSTATE_LOAD, + BLKDBG_VMSTATE_SAVE, + + BLKDBG_COW_READ, + BLKDBG_COW_WRITE, + + BLKDBG_REFTABLE_LOAD, + BLKDBG_REFTABLE_GROW, + + BLKDBG_REFBLOCK_LOAD, + BLKDBG_REFBLOCK_UPDATE, + BLKDBG_REFBLOCK_UPDATE_PART, + BLKDBG_REFBLOCK_ALLOC, + BLKDBG_REFBLOCK_ALLOC_HOOKUP, + BLKDBG_REFBLOCK_ALLOC_WRITE, + BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS, + BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE, + BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE, + + BLKDBG_CLUSTER_ALLOC, + BLKDBG_CLUSTER_ALLOC_BYTES, + BLKDBG_CLUSTER_FREE, + + BLKDBG_FLUSH_TO_OS, + BLKDBG_FLUSH_TO_DISK, + + BLKDBG_EVENT_MAX, +} BlkDebugEvent; + +#define BLKDBG_EVENT(bs, evt) bdrv_debug_event(bs, evt) +void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event); + +int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event, + const char *tag); +int bdrv_debug_resume(BlockDriverState *bs, const char *tag); +bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag); + +#endif diff --git a/contrib/qemu/include/block/block_int.h b/contrib/qemu/include/block/block_int.h new file mode 100644 index 00000000000..c6ac871e210 --- /dev/null +++ b/contrib/qemu/include/block/block_int.h @@ -0,0 +1,421 @@ +/* + * QEMU System Emulator block driver + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef BLOCK_INT_H +#define BLOCK_INT_H + +#include "block/block.h" +#include "qemu/option.h" +#include "qemu/queue.h" +#include "block/coroutine.h" +#include "qemu/timer.h" +#include "qapi-types.h" +#include "qapi/qmp/qerror.h" +#include "monitor/monitor.h" +#include "qemu/hbitmap.h" +#include "block/snapshot.h" + +#define BLOCK_FLAG_ENCRYPT 1 +#define BLOCK_FLAG_COMPAT6 4 +#define BLOCK_FLAG_LAZY_REFCOUNTS 8 + +#define BLOCK_IO_LIMIT_READ 0 +#define BLOCK_IO_LIMIT_WRITE 1 +#define BLOCK_IO_LIMIT_TOTAL 2 + +#define BLOCK_IO_SLICE_TIME 100000000 +#define NANOSECONDS_PER_SECOND 1000000000.0 + +#define BLOCK_OPT_SIZE "size" +#define BLOCK_OPT_ENCRYPT "encryption" +#define BLOCK_OPT_COMPAT6 "compat6" +#define BLOCK_OPT_BACKING_FILE "backing_file" +#define BLOCK_OPT_BACKING_FMT "backing_fmt" +#define BLOCK_OPT_CLUSTER_SIZE "cluster_size" +#define BLOCK_OPT_TABLE_SIZE "table_size" +#define BLOCK_OPT_PREALLOC "preallocation" +#define BLOCK_OPT_SUBFMT "subformat" +#define BLOCK_OPT_COMPAT_LEVEL "compat" +#define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts" +#define BLOCK_OPT_ADAPTER_TYPE "adapter_type" + +typedef struct BdrvTrackedRequest { + BlockDriverState *bs; + int64_t sector_num; + int nb_sectors; + bool is_write; + QLIST_ENTRY(BdrvTrackedRequest) list; + Coroutine *co; /* owner, used for deadlock detection */ + CoQueue wait_queue; /* coroutines blocked on this request */ +} BdrvTrackedRequest; + + +typedef struct BlockIOLimit { + int64_t bps[3]; + int64_t iops[3]; +} BlockIOLimit; + +typedef struct BlockIOBaseValue { + uint64_t bytes[2]; + uint64_t ios[2]; +} BlockIOBaseValue; + +struct BlockDriver { + const char *format_name; + int instance_size; + int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename); + int (*bdrv_probe_device)(const char *filename); + + /* Any driver implementing this callback is expected to be able to handle + * NULL file names in its .bdrv_open() implementation */ + void (*bdrv_parse_filename)(const char *filename, QDict *options, Error **errp); + + /* For handling image reopen for split or non-split files */ + int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state, + BlockReopenQueue *queue, Error **errp); + void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state); + void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state); + + int (*bdrv_open)(BlockDriverState *bs, QDict *options, int flags); + int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags); + int (*bdrv_read)(BlockDriverState *bs, int64_t sector_num, + uint8_t *buf, int nb_sectors); + int (*bdrv_write)(BlockDriverState *bs, int64_t sector_num, + const uint8_t *buf, int nb_sectors); + void (*bdrv_close)(BlockDriverState *bs); + void (*bdrv_rebind)(BlockDriverState *bs); + int (*bdrv_create)(const char *filename, QEMUOptionParameter *options); + int (*bdrv_set_key)(BlockDriverState *bs, const char *key); + int (*bdrv_make_empty)(BlockDriverState *bs); + /* aio */ + BlockDriverAIOCB *(*bdrv_aio_readv)(BlockDriverState *bs, + int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, + BlockDriverCompletionFunc *cb, void *opaque); + BlockDriverAIOCB *(*bdrv_aio_writev)(BlockDriverState *bs, + int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, + BlockDriverCompletionFunc *cb, void *opaque); + BlockDriverAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs, + BlockDriverCompletionFunc *cb, void *opaque); + BlockDriverAIOCB *(*bdrv_aio_discard)(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, + BlockDriverCompletionFunc *cb, void *opaque); + + int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); + int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); + /* + * Efficiently zero a region of the disk image. Typically an image format + * would use a compact metadata representation to implement this. This + * function pointer may be NULL and .bdrv_co_writev() will be called + * instead. + */ + int coroutine_fn (*bdrv_co_write_zeroes)(BlockDriverState *bs, + int64_t sector_num, int nb_sectors); + int coroutine_fn (*bdrv_co_discard)(BlockDriverState *bs, + int64_t sector_num, int nb_sectors); + int coroutine_fn (*bdrv_co_is_allocated)(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, int *pnum); + + /* + * Invalidate any cached meta-data. + */ + void (*bdrv_invalidate_cache)(BlockDriverState *bs); + + /* + * Flushes all data that was already written to the OS all the way down to + * the disk (for example raw-posix calls fsync()). + */ + int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs); + + /* + * Flushes all internal caches to the OS. The data may still sit in a + * writeback cache of the host OS, but it will survive a crash of the qemu + * process. + */ + int coroutine_fn (*bdrv_co_flush_to_os)(BlockDriverState *bs); + + const char *protocol_name; + int (*bdrv_truncate)(BlockDriverState *bs, int64_t offset); + int64_t (*bdrv_getlength)(BlockDriverState *bs); + int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs); + int (*bdrv_write_compressed)(BlockDriverState *bs, int64_t sector_num, + const uint8_t *buf, int nb_sectors); + + int (*bdrv_snapshot_create)(BlockDriverState *bs, + QEMUSnapshotInfo *sn_info); + int (*bdrv_snapshot_goto)(BlockDriverState *bs, + const char *snapshot_id); + int (*bdrv_snapshot_delete)(BlockDriverState *bs, const char *snapshot_id); + int (*bdrv_snapshot_list)(BlockDriverState *bs, + QEMUSnapshotInfo **psn_info); + int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs, + const char *snapshot_name); + int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi); + + int (*bdrv_save_vmstate)(BlockDriverState *bs, QEMUIOVector *qiov, + int64_t pos); + int (*bdrv_load_vmstate)(BlockDriverState *bs, uint8_t *buf, + int64_t pos, int size); + + int (*bdrv_change_backing_file)(BlockDriverState *bs, + const char *backing_file, const char *backing_fmt); + + /* removable device specific */ + int (*bdrv_is_inserted)(BlockDriverState *bs); + int (*bdrv_media_changed)(BlockDriverState *bs); + void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag); + void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked); + + /* to control generic scsi devices */ + int (*bdrv_ioctl)(BlockDriverState *bs, unsigned long int req, void *buf); + BlockDriverAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs, + unsigned long int req, void *buf, + BlockDriverCompletionFunc *cb, void *opaque); + + /* List of options for creating images, terminated by name == NULL */ + QEMUOptionParameter *create_options; + + + /* + * Returns 0 for completed check, -errno for internal errors. + * The check results are stored in result. + */ + int (*bdrv_check)(BlockDriverState* bs, BdrvCheckResult *result, + BdrvCheckMode fix); + + void (*bdrv_debug_event)(BlockDriverState *bs, BlkDebugEvent event); + + /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */ + int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event, + const char *tag); + int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag); + bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag); + + /* + * Returns 1 if newly created images are guaranteed to contain only + * zeros, 0 otherwise. + */ + int (*bdrv_has_zero_init)(BlockDriverState *bs); + + QLIST_ENTRY(BlockDriver) list; +}; + +/* + * Note: the function bdrv_append() copies and swaps contents of + * BlockDriverStates, so if you add new fields to this struct, please + * inspect bdrv_append() to determine if the new fields need to be + * copied as well. + */ +struct BlockDriverState { + int64_t total_sectors; /* if we are reading a disk image, give its + size in sectors */ + int read_only; /* if true, the media is read only */ + int open_flags; /* flags used to open the file, re-used for re-open */ + int encrypted; /* if true, the media is encrypted */ + int valid_key; /* if true, a valid encryption key has been set */ + int sg; /* if true, the device is a /dev/sg* */ + int copy_on_read; /* if true, copy read backing sectors into image + note this is a reference count */ + + BlockDriver *drv; /* NULL means no media */ + void *opaque; + + void *dev; /* attached device model, if any */ + /* TODO change to DeviceState when all users are qdevified */ + const BlockDevOps *dev_ops; + void *dev_opaque; + + char filename[1024]; + char backing_file[1024]; /* if non zero, the image is a diff of + this file image */ + char backing_format[16]; /* if non-zero and backing_file exists */ + int is_temporary; + + BlockDriverState *backing_hd; + BlockDriverState *file; + + NotifierList close_notifiers; + + /* Callback before write request is processed */ + NotifierWithReturnList before_write_notifiers; + + /* number of in-flight copy-on-read requests */ + unsigned int copy_on_read_in_flight; + + /* the time for latest disk I/O */ + int64_t slice_start; + int64_t slice_end; + BlockIOLimit io_limits; + BlockIOBaseValue slice_submitted; + CoQueue throttled_reqs; + QEMUTimer *block_timer; + bool io_limits_enabled; + + /* I/O stats (display with "info blockstats"). */ + uint64_t nr_bytes[BDRV_MAX_IOTYPE]; + uint64_t nr_ops[BDRV_MAX_IOTYPE]; + uint64_t total_time_ns[BDRV_MAX_IOTYPE]; + uint64_t wr_highest_sector; + + /* Whether the disk can expand beyond total_sectors */ + int growable; + + /* the memory alignment required for the buffers handled by this driver */ + int buffer_alignment; + + /* do we need to tell the quest if we have a volatile write cache? */ + int enable_write_cache; + + /* NOTE: the following infos are only hints for real hardware + drivers. They are not used by the block driver */ + BlockdevOnError on_read_error, on_write_error; + bool iostatus_enabled; + BlockDeviceIoStatus iostatus; + char device_name[32]; + HBitmap *dirty_bitmap; + int in_use; /* users other than guest access, eg. block migration */ + QTAILQ_ENTRY(BlockDriverState) list; + + QLIST_HEAD(, BdrvTrackedRequest) tracked_requests; + + /* long-running background operation */ + BlockJob *job; + + QDict *options; +}; + +int get_tmp_filename(char *filename, int size); + +void bdrv_set_io_limits(BlockDriverState *bs, + BlockIOLimit *io_limits); + +/** + * bdrv_add_before_write_notifier: + * + * Register a callback that is invoked before write requests are processed but + * after any throttling or waiting for overlapping requests. + */ +void bdrv_add_before_write_notifier(BlockDriverState *bs, + NotifierWithReturn *notifier); + +/** + * bdrv_get_aio_context: + * + * Returns: the currently bound #AioContext + */ +AioContext *bdrv_get_aio_context(BlockDriverState *bs); + +#ifdef _WIN32 +int is_windows_drive(const char *filename); +#endif +void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv, + enum MonitorEvent ev, + BlockErrorAction action, bool is_read); + +/** + * stream_start: + * @bs: Block device to operate on. + * @base: Block device that will become the new base, or %NULL to + * flatten the whole backing file chain onto @bs. + * @base_id: The file name that will be written to @bs as the new + * backing file if the job completes. Ignored if @base is %NULL. + * @speed: The maximum speed, in bytes per second, or 0 for unlimited. + * @on_error: The action to take upon error. + * @cb: Completion function for the job. + * @opaque: Opaque pointer value passed to @cb. + * @errp: Error object. + * + * Start a streaming operation on @bs. Clusters that are unallocated + * in @bs, but allocated in any image between @base and @bs (both + * exclusive) will be written to @bs. At the end of a successful + * streaming job, the backing file of @bs will be changed to + * @base_id in the written image and to @base in the live BlockDriverState. + */ +void stream_start(BlockDriverState *bs, BlockDriverState *base, + const char *base_id, int64_t speed, BlockdevOnError on_error, + BlockDriverCompletionFunc *cb, + void *opaque, Error **errp); + +/** + * commit_start: + * @bs: Top Block device + * @base: Block device that will be written into, and become the new top + * @speed: The maximum speed, in bytes per second, or 0 for unlimited. + * @on_error: The action to take upon error. + * @cb: Completion function for the job. + * @opaque: Opaque pointer value passed to @cb. + * @errp: Error object. + * + */ +void commit_start(BlockDriverState *bs, BlockDriverState *base, + BlockDriverState *top, int64_t speed, + BlockdevOnError on_error, BlockDriverCompletionFunc *cb, + void *opaque, Error **errp); + +/* + * mirror_start: + * @bs: Block device to operate on. + * @target: Block device to write to. + * @speed: The maximum speed, in bytes per second, or 0 for unlimited. + * @granularity: The chosen granularity for the dirty bitmap. + * @buf_size: The amount of data that can be in flight at one time. + * @mode: Whether to collapse all images in the chain to the target. + * @on_source_error: The action to take upon error reading from the source. + * @on_target_error: The action to take upon error writing to the target. + * @cb: Completion function for the job. + * @opaque: Opaque pointer value passed to @cb. + * @errp: Error object. + * + * Start a mirroring operation on @bs. Clusters that are allocated + * in @bs will be written to @bs until the job is cancelled or + * manually completed. At the end of a successful mirroring job, + * @bs will be switched to read from @target. + */ +void mirror_start(BlockDriverState *bs, BlockDriverState *target, + int64_t speed, int64_t granularity, int64_t buf_size, + MirrorSyncMode mode, BlockdevOnError on_source_error, + BlockdevOnError on_target_error, + BlockDriverCompletionFunc *cb, + void *opaque, Error **errp); + +/* + * backup_start: + * @bs: Block device to operate on. + * @target: Block device to write to. + * @speed: The maximum speed, in bytes per second, or 0 for unlimited. + * @on_source_error: The action to take upon error reading from the source. + * @on_target_error: The action to take upon error writing to the target. + * @cb: Completion function for the job. + * @opaque: Opaque pointer value passed to @cb. + * + * Start a backup operation on @bs. Clusters in @bs are written to @target + * until the job is cancelled or manually completed. + */ +void backup_start(BlockDriverState *bs, BlockDriverState *target, + int64_t speed, BlockdevOnError on_source_error, + BlockdevOnError on_target_error, + BlockDriverCompletionFunc *cb, void *opaque, + Error **errp); + +#endif /* BLOCK_INT_H */ diff --git a/contrib/qemu/include/block/blockjob.h b/contrib/qemu/include/block/blockjob.h new file mode 100644 index 00000000000..c290d07bba0 --- /dev/null +++ b/contrib/qemu/include/block/blockjob.h @@ -0,0 +1,278 @@ +/* + * Declarations for long-running block device operations + * + * Copyright (c) 2011 IBM Corp. + * Copyright (c) 2012 Red Hat, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef BLOCKJOB_H +#define BLOCKJOB_H 1 + +#include "block/block.h" + +/** + * BlockJobType: + * + * A class type for block job objects. + */ +typedef struct BlockJobType { + /** Derived BlockJob struct size */ + size_t instance_size; + + /** String describing the operation, part of query-block-jobs QMP API */ + const char *job_type; + + /** Optional callback for job types that support setting a speed limit */ + void (*set_speed)(BlockJob *job, int64_t speed, Error **errp); + + /** Optional callback for job types that need to forward I/O status reset */ + void (*iostatus_reset)(BlockJob *job); + + /** + * Optional callback for job types whose completion must be triggered + * manually. + */ + void (*complete)(BlockJob *job, Error **errp); +} BlockJobType; + +/** + * BlockJob: + * + * Long-running operation on a BlockDriverState. + */ +struct BlockJob { + /** The job type, including the job vtable. */ + const BlockJobType *job_type; + + /** The block device on which the job is operating. */ + BlockDriverState *bs; + + /** + * The coroutine that executes the job. If not NULL, it is + * reentered when busy is false and the job is cancelled. + */ + Coroutine *co; + + /** + * Set to true if the job should cancel itself. The flag must + * always be tested just before toggling the busy flag from false + * to true. After a job has been cancelled, it should only yield + * if #qemu_aio_wait will ("sooner or later") reenter the coroutine. + */ + bool cancelled; + + /** + * Set to true if the job is either paused, or will pause itself + * as soon as possible (if busy == true). + */ + bool paused; + + /** + * Set to false by the job while it is in a quiescent state, where + * no I/O is pending and the job has yielded on any condition + * that is not detected by #qemu_aio_wait, such as a timer. + */ + bool busy; + + /** Status that is published by the query-block-jobs QMP API */ + BlockDeviceIoStatus iostatus; + + /** Offset that is published by the query-block-jobs QMP API */ + int64_t offset; + + /** Length that is published by the query-block-jobs QMP API */ + int64_t len; + + /** Speed that was set with @block_job_set_speed. */ + int64_t speed; + + /** The completion function that will be called when the job completes. */ + BlockDriverCompletionFunc *cb; + + /** The opaque value that is passed to the completion function. */ + void *opaque; +}; + +/** + * block_job_create: + * @job_type: The class object for the newly-created job. + * @bs: The block + * @speed: The maximum speed, in bytes per second, or 0 for unlimited. + * @cb: Completion function for the job. + * @opaque: Opaque pointer value passed to @cb. + * @errp: Error object. + * + * Create a new long-running block device job and return it. The job + * will call @cb asynchronously when the job completes. Note that + * @bs may have been closed at the time the @cb it is called. If + * this is the case, the job may be reported as either cancelled or + * completed. + * + * This function is not part of the public job interface; it should be + * called from a wrapper that is specific to the job type. + */ +void *block_job_create(const BlockJobType *job_type, BlockDriverState *bs, + int64_t speed, BlockDriverCompletionFunc *cb, + void *opaque, Error **errp); + +/** + * block_job_sleep_ns: + * @job: The job that calls the function. + * @clock: The clock to sleep on. + * @ns: How many nanoseconds to stop for. + * + * Put the job to sleep (assuming that it wasn't canceled) for @ns + * nanoseconds. Canceling the job will interrupt the wait immediately. + */ +void block_job_sleep_ns(BlockJob *job, QEMUClock *clock, int64_t ns); + +/** + * block_job_completed: + * @job: The job being completed. + * @ret: The status code. + * + * Call the completion function that was registered at creation time, and + * free @job. + */ +void block_job_completed(BlockJob *job, int ret); + +/** + * block_job_set_speed: + * @job: The job to set the speed for. + * @speed: The new value + * @errp: Error object. + * + * Set a rate-limiting parameter for the job; the actual meaning may + * vary depending on the job type. + */ +void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp); + +/** + * block_job_cancel: + * @job: The job to be canceled. + * + * Asynchronously cancel the specified job. + */ +void block_job_cancel(BlockJob *job); + +/** + * block_job_complete: + * @job: The job to be completed. + * @errp: Error object. + * + * Asynchronously complete the specified job. + */ +void block_job_complete(BlockJob *job, Error **errp); + +/** + * block_job_is_cancelled: + * @job: The job being queried. + * + * Returns whether the job is scheduled for cancellation. + */ +bool block_job_is_cancelled(BlockJob *job); + +/** + * block_job_query: + * @job: The job to get information about. + * + * Return information about a job. + */ +BlockJobInfo *block_job_query(BlockJob *job); + +/** + * block_job_pause: + * @job: The job to be paused. + * + * Asynchronously pause the specified job. + */ +void block_job_pause(BlockJob *job); + +/** + * block_job_resume: + * @job: The job to be resumed. + * + * Resume the specified job. + */ +void block_job_resume(BlockJob *job); + +/** + * qobject_from_block_job: + * @job: The job whose information is requested. + * + * Return a QDict corresponding to @job's query-block-jobs entry. + */ +QObject *qobject_from_block_job(BlockJob *job); + +/** + * block_job_ready: + * @job: The job which is now ready to complete. + * + * Send a BLOCK_JOB_READY event for the specified job. + */ +void block_job_ready(BlockJob *job); + +/** + * block_job_is_paused: + * @job: The job being queried. + * + * Returns whether the job is currently paused, or will pause + * as soon as it reaches a sleeping point. + */ +bool block_job_is_paused(BlockJob *job); + +/** + * block_job_cancel_sync: + * @job: The job to be canceled. + * + * Synchronously cancel the job. The completion callback is called + * before the function returns. The job may actually complete + * instead of canceling itself; the circumstances under which this + * happens depend on the kind of job that is active. + * + * Returns the return value from the job if the job actually completed + * during the call, or -ECANCELED if it was canceled. + */ +int block_job_cancel_sync(BlockJob *job); + +/** + * block_job_iostatus_reset: + * @job: The job whose I/O status should be reset. + * + * Reset I/O status on @job and on BlockDriverState objects it uses, + * other than job->bs. + */ +void block_job_iostatus_reset(BlockJob *job); + +/** + * block_job_error_action: + * @job: The job to signal an error for. + * @bs: The block device on which to set an I/O error. + * @on_err: The error action setting. + * @is_read: Whether the operation was a read. + * @error: The error that was reported. + * + * Report an I/O error for a block job and possibly stop the VM. Return the + * action that was selected based on @on_err and @error. + */ +BlockErrorAction block_job_error_action(BlockJob *job, BlockDriverState *bs, + BlockdevOnError on_err, + int is_read, int error); +#endif diff --git a/contrib/qemu/include/block/coroutine.h b/contrib/qemu/include/block/coroutine.h new file mode 100644 index 00000000000..377805a3b08 --- /dev/null +++ b/contrib/qemu/include/block/coroutine.h @@ -0,0 +1,218 @@ +/* + * QEMU coroutine implementation + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> + * Kevin Wolf <kwolf@redhat.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QEMU_COROUTINE_H +#define QEMU_COROUTINE_H + +#include <stdbool.h> +#include "qemu/queue.h" +#include "qemu/timer.h" + +/** + * Coroutines are a mechanism for stack switching and can be used for + * cooperative userspace threading. These functions provide a simple but + * useful flavor of coroutines that is suitable for writing sequential code, + * rather than callbacks, for operations that need to give up control while + * waiting for events to complete. + * + * These functions are re-entrant and may be used outside the global mutex. + */ + +/** + * Mark a function that executes in coroutine context + * + * Functions that execute in coroutine context cannot be called directly from + * normal functions. In the future it would be nice to enable compiler or + * static checker support for catching such errors. This annotation might make + * it possible and in the meantime it serves as documentation. + * + * For example: + * + * static void coroutine_fn foo(void) { + * .... + * } + */ +#define coroutine_fn + +typedef struct Coroutine Coroutine; + +/** + * Coroutine entry point + * + * When the coroutine is entered for the first time, opaque is passed in as an + * argument. + * + * When this function returns, the coroutine is destroyed automatically and + * execution continues in the caller who last entered the coroutine. + */ +typedef void coroutine_fn CoroutineEntry(void *opaque); + +/** + * Create a new coroutine + * + * Use qemu_coroutine_enter() to actually transfer control to the coroutine. + */ +Coroutine *qemu_coroutine_create(CoroutineEntry *entry); + +/** + * Transfer control to a coroutine + * + * The opaque argument is passed as the argument to the entry point when + * entering the coroutine for the first time. It is subsequently ignored. + */ +void qemu_coroutine_enter(Coroutine *coroutine, void *opaque); + +/** + * Transfer control back to a coroutine's caller + * + * This function does not return until the coroutine is re-entered using + * qemu_coroutine_enter(). + */ +void coroutine_fn qemu_coroutine_yield(void); + +/** + * Get the currently executing coroutine + */ +Coroutine *coroutine_fn qemu_coroutine_self(void); + +/** + * Return whether or not currently inside a coroutine + * + * This can be used to write functions that work both when in coroutine context + * and when not in coroutine context. Note that such functions cannot use the + * coroutine_fn annotation since they work outside coroutine context. + */ +bool qemu_in_coroutine(void); + + + +/** + * CoQueues are a mechanism to queue coroutines in order to continue executing + * them later. They provide the fundamental primitives on which coroutine locks + * are built. + */ +typedef struct CoQueue { + QTAILQ_HEAD(, Coroutine) entries; + AioContext *ctx; +} CoQueue; + +/** + * Initialise a CoQueue. This must be called before any other operation is used + * on the CoQueue. + */ +void qemu_co_queue_init(CoQueue *queue); + +/** + * Adds the current coroutine to the CoQueue and transfers control to the + * caller of the coroutine. + */ +void coroutine_fn qemu_co_queue_wait(CoQueue *queue); + +/** + * Adds the current coroutine to the head of the CoQueue and transfers control to the + * caller of the coroutine. + */ +void coroutine_fn qemu_co_queue_wait_insert_head(CoQueue *queue); + +/** + * Restarts the next coroutine in the CoQueue and removes it from the queue. + * + * Returns true if a coroutine was restarted, false if the queue is empty. + */ +bool qemu_co_queue_next(CoQueue *queue); + +/** + * Restarts all coroutines in the CoQueue and leaves the queue empty. + */ +void qemu_co_queue_restart_all(CoQueue *queue); + +/** + * Checks if the CoQueue is empty. + */ +bool qemu_co_queue_empty(CoQueue *queue); + + +/** + * Provides a mutex that can be used to synchronise coroutines + */ +typedef struct CoMutex { + bool locked; + CoQueue queue; +} CoMutex; + +/** + * Initialises a CoMutex. This must be called before any other operation is used + * on the CoMutex. + */ +void qemu_co_mutex_init(CoMutex *mutex); + +/** + * Locks the mutex. If the lock cannot be taken immediately, control is + * transferred to the caller of the current coroutine. + */ +void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex); + +/** + * Unlocks the mutex and schedules the next coroutine that was waiting for this + * lock to be run. + */ +void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex); + +typedef struct CoRwlock { + bool writer; + int reader; + CoQueue queue; +} CoRwlock; + +/** + * Initialises a CoRwlock. This must be called before any other operation + * is used on the CoRwlock + */ +void qemu_co_rwlock_init(CoRwlock *lock); + +/** + * Read locks the CoRwlock. If the lock cannot be taken immediately because + * of a parallel writer, control is transferred to the caller of the current + * coroutine. + */ +void qemu_co_rwlock_rdlock(CoRwlock *lock); + +/** + * Write Locks the mutex. If the lock cannot be taken immediately because + * of a parallel reader, control is transferred to the caller of the current + * coroutine. + */ +void qemu_co_rwlock_wrlock(CoRwlock *lock); + +/** + * Unlocks the read/write lock and schedules the next coroutine that was + * waiting for this lock to be run. + */ +void qemu_co_rwlock_unlock(CoRwlock *lock); + +/** + * Yield the coroutine for a given duration + * + * Note this function uses timers and hence only works when a main loop is in + * use. See main-loop.h and do not use from qemu-tool programs. + */ +void coroutine_fn co_sleep_ns(QEMUClock *clock, int64_t ns); + +/** + * Yield until a file descriptor becomes readable + * + * Note that this function clobbers the handlers for the file descriptor. + */ +void coroutine_fn yield_until_fd_readable(int fd); +#endif /* QEMU_COROUTINE_H */ diff --git a/contrib/qemu/include/block/coroutine_int.h b/contrib/qemu/include/block/coroutine_int.h new file mode 100644 index 00000000000..f133d65af86 --- /dev/null +++ b/contrib/qemu/include/block/coroutine_int.h @@ -0,0 +1,53 @@ +/* + * Coroutine internals + * + * Copyright (c) 2011 Kevin Wolf <kwolf@redhat.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef QEMU_COROUTINE_INT_H +#define QEMU_COROUTINE_INT_H + +#include "qemu/queue.h" +#include "block/coroutine.h" + +typedef enum { + COROUTINE_YIELD = 1, + COROUTINE_TERMINATE = 2, +} CoroutineAction; + +struct Coroutine { + CoroutineEntry *entry; + void *entry_arg; + Coroutine *caller; + QSLIST_ENTRY(Coroutine) pool_next; + + /* Coroutines that should be woken up when we yield or terminate */ + QTAILQ_HEAD(, Coroutine) co_queue_wakeup; + QTAILQ_ENTRY(Coroutine) co_queue_next; +}; + +Coroutine *qemu_coroutine_new(void); +void qemu_coroutine_delete(Coroutine *co); +CoroutineAction qemu_coroutine_switch(Coroutine *from, Coroutine *to, + CoroutineAction action); +void coroutine_fn qemu_co_queue_run_restart(Coroutine *co); + +#endif diff --git a/contrib/qemu/include/block/snapshot.h b/contrib/qemu/include/block/snapshot.h new file mode 100644 index 00000000000..eaf61f0326e --- /dev/null +++ b/contrib/qemu/include/block/snapshot.h @@ -0,0 +1,53 @@ +/* + * Block layer snapshot related functions + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef SNAPSHOT_H +#define SNAPSHOT_H + +#include "qemu-common.h" + +typedef struct QEMUSnapshotInfo { + char id_str[128]; /* unique snapshot id */ + /* the following fields are informative. They are not needed for + the consistency of the snapshot */ + char name[256]; /* user chosen name */ + uint64_t vm_state_size; /* VM state info size */ + uint32_t date_sec; /* UTC date of the snapshot */ + uint32_t date_nsec; + uint64_t vm_clock_nsec; /* VM clock relative to boot */ +} QEMUSnapshotInfo; + +int bdrv_snapshot_find(BlockDriverState *bs, QEMUSnapshotInfo *sn_info, + const char *name); +int bdrv_can_snapshot(BlockDriverState *bs); +int bdrv_snapshot_create(BlockDriverState *bs, + QEMUSnapshotInfo *sn_info); +int bdrv_snapshot_goto(BlockDriverState *bs, + const char *snapshot_id); +int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id); +int bdrv_snapshot_list(BlockDriverState *bs, + QEMUSnapshotInfo **psn_info); +int bdrv_snapshot_load_tmp(BlockDriverState *bs, + const char *snapshot_name); +#endif diff --git a/contrib/qemu/include/config.h b/contrib/qemu/include/config.h new file mode 100644 index 00000000000..e20f78696a1 --- /dev/null +++ b/contrib/qemu/include/config.h @@ -0,0 +1,2 @@ +#include "config-host.h" +#include "config-target.h" diff --git a/contrib/qemu/include/exec/cpu-common.h b/contrib/qemu/include/exec/cpu-common.h new file mode 100644 index 00000000000..e4996e19c32 --- /dev/null +++ b/contrib/qemu/include/exec/cpu-common.h @@ -0,0 +1,124 @@ +#ifndef CPU_COMMON_H +#define CPU_COMMON_H 1 + +/* CPU interfaces that are target independent. */ + +#ifndef CONFIG_USER_ONLY +#include "exec/hwaddr.h" +#endif + +#ifndef NEED_CPU_H +#include "exec/poison.h" +#endif + +#include "qemu/bswap.h" +#include "qemu/queue.h" + +/** + * CPUListState: + * @cpu_fprintf: Print function. + * @file: File to print to using @cpu_fprint. + * + * State commonly used for iterating over CPU models. + */ +typedef struct CPUListState { + fprintf_function cpu_fprintf; + FILE *file; +} CPUListState; + +#if !defined(CONFIG_USER_ONLY) + +enum device_endian { + DEVICE_NATIVE_ENDIAN, + DEVICE_BIG_ENDIAN, + DEVICE_LITTLE_ENDIAN, +}; + +/* address in the RAM (different from a physical address) */ +#if defined(CONFIG_XEN_BACKEND) +typedef uint64_t ram_addr_t; +# define RAM_ADDR_MAX UINT64_MAX +# define RAM_ADDR_FMT "%" PRIx64 +#else +typedef uintptr_t ram_addr_t; +# define RAM_ADDR_MAX UINTPTR_MAX +# define RAM_ADDR_FMT "%" PRIxPTR +#endif + +/* memory API */ + +typedef void CPUWriteMemoryFunc(void *opaque, hwaddr addr, uint32_t value); +typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr); + +void qemu_ram_remap(ram_addr_t addr, ram_addr_t length); +/* This should not be used by devices. */ +MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr); +void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev); + +void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf, + int len, int is_write); +static inline void cpu_physical_memory_read(hwaddr addr, + void *buf, int len) +{ + cpu_physical_memory_rw(addr, buf, len, 0); +} +static inline void cpu_physical_memory_write(hwaddr addr, + const void *buf, int len) +{ + cpu_physical_memory_rw(addr, (void *)buf, len, 1); +} +void *cpu_physical_memory_map(hwaddr addr, + hwaddr *plen, + int is_write); +void cpu_physical_memory_unmap(void *buffer, hwaddr len, + int is_write, hwaddr access_len); +void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)); + +bool cpu_physical_memory_is_io(hwaddr phys_addr); + +/* Coalesced MMIO regions are areas where write operations can be reordered. + * This usually implies that write operations are side-effect free. This allows + * batching which can make a major impact on performance when using + * virtualization. + */ +void qemu_flush_coalesced_mmio_buffer(void); + +uint32_t ldub_phys(hwaddr addr); +uint32_t lduw_le_phys(hwaddr addr); +uint32_t lduw_be_phys(hwaddr addr); +uint32_t ldl_le_phys(hwaddr addr); +uint32_t ldl_be_phys(hwaddr addr); +uint64_t ldq_le_phys(hwaddr addr); +uint64_t ldq_be_phys(hwaddr addr); +void stb_phys(hwaddr addr, uint32_t val); +void stw_le_phys(hwaddr addr, uint32_t val); +void stw_be_phys(hwaddr addr, uint32_t val); +void stl_le_phys(hwaddr addr, uint32_t val); +void stl_be_phys(hwaddr addr, uint32_t val); +void stq_le_phys(hwaddr addr, uint64_t val); +void stq_be_phys(hwaddr addr, uint64_t val); + +#ifdef NEED_CPU_H +uint32_t lduw_phys(hwaddr addr); +uint32_t ldl_phys(hwaddr addr); +uint64_t ldq_phys(hwaddr addr); +void stl_phys_notdirty(hwaddr addr, uint32_t val); +void stw_phys(hwaddr addr, uint32_t val); +void stl_phys(hwaddr addr, uint32_t val); +void stq_phys(hwaddr addr, uint64_t val); +#endif + +void cpu_physical_memory_write_rom(hwaddr addr, + const uint8_t *buf, int len); + +extern struct MemoryRegion io_mem_rom; +extern struct MemoryRegion io_mem_notdirty; + +typedef void (RAMBlockIterFunc)(void *host_addr, + ram_addr_t offset, ram_addr_t length, void *opaque); + +void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque); + +#endif + +#endif /* !CPU_COMMON_H */ diff --git a/contrib/qemu/include/exec/hwaddr.h b/contrib/qemu/include/exec/hwaddr.h new file mode 100644 index 00000000000..c9eb78fba18 --- /dev/null +++ b/contrib/qemu/include/exec/hwaddr.h @@ -0,0 +1,20 @@ +/* Define hwaddr if it exists. */ + +#ifndef HWADDR_H +#define HWADDR_H + +#define HWADDR_BITS 64 +/* hwaddr is the type of a physical address (its size can + be different from 'target_ulong'). */ + +typedef uint64_t hwaddr; +#define HWADDR_MAX UINT64_MAX +#define TARGET_FMT_plx "%016" PRIx64 +#define HWADDR_PRId PRId64 +#define HWADDR_PRIi PRIi64 +#define HWADDR_PRIo PRIo64 +#define HWADDR_PRIu PRIu64 +#define HWADDR_PRIx PRIx64 +#define HWADDR_PRIX PRIX64 + +#endif diff --git a/contrib/qemu/include/exec/poison.h b/contrib/qemu/include/exec/poison.h new file mode 100644 index 00000000000..2341a750413 --- /dev/null +++ b/contrib/qemu/include/exec/poison.h @@ -0,0 +1,63 @@ +/* Poison identifiers that should not be used when building + target independent device code. */ + +#ifndef HW_POISON_H +#define HW_POISON_H +#ifdef __GNUC__ + +#pragma GCC poison TARGET_I386 +#pragma GCC poison TARGET_X86_64 +#pragma GCC poison TARGET_ALPHA +#pragma GCC poison TARGET_ARM +#pragma GCC poison TARGET_CRIS +#pragma GCC poison TARGET_LM32 +#pragma GCC poison TARGET_M68K +#pragma GCC poison TARGET_MIPS +#pragma GCC poison TARGET_MIPS64 +#pragma GCC poison TARGET_OPENRISC +#pragma GCC poison TARGET_PPC +#pragma GCC poison TARGET_PPCEMB +#pragma GCC poison TARGET_PPC64 +#pragma GCC poison TARGET_ABI32 +#pragma GCC poison TARGET_SH4 +#pragma GCC poison TARGET_SPARC +#pragma GCC poison TARGET_SPARC64 + +#pragma GCC poison TARGET_WORDS_BIGENDIAN +#pragma GCC poison BSWAP_NEEDED + +#pragma GCC poison TARGET_LONG_BITS +#pragma GCC poison TARGET_FMT_lx +#pragma GCC poison TARGET_FMT_ld + +#pragma GCC poison TARGET_PAGE_SIZE +#pragma GCC poison TARGET_PAGE_MASK +#pragma GCC poison TARGET_PAGE_BITS +#pragma GCC poison TARGET_PAGE_ALIGN + +#pragma GCC poison CPUArchState +#pragma GCC poison env + +#pragma GCC poison lduw_phys +#pragma GCC poison ldl_phys +#pragma GCC poison ldq_phys +#pragma GCC poison stl_phys_notdirty +#pragma GCC poison stw_phys +#pragma GCC poison stl_phys +#pragma GCC poison stq_phys + +#pragma GCC poison CPU_INTERRUPT_HARD +#pragma GCC poison CPU_INTERRUPT_EXITTB +#pragma GCC poison CPU_INTERRUPT_HALT +#pragma GCC poison CPU_INTERRUPT_DEBUG +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_0 +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_1 +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_2 +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_3 +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_4 +#pragma GCC poison CPU_INTERRUPT_TGT_INT_0 +#pragma GCC poison CPU_INTERRUPT_TGT_INT_1 +#pragma GCC poison CPU_INTERRUPT_TGT_INT_2 + +#endif +#endif diff --git a/contrib/qemu/include/fpu/softfloat.h b/contrib/qemu/include/fpu/softfloat.h new file mode 100644 index 00000000000..f3927e2419f --- /dev/null +++ b/contrib/qemu/include/fpu/softfloat.h @@ -0,0 +1,641 @@ +/* + * QEMU float support + * + * Derived from SoftFloat. + */ + +/*============================================================================ + +This C header file is part of the SoftFloat IEC/IEEE Floating-point Arithmetic +Package, Release 2b. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page `http://www.cs.berkeley.edu/~jhauser/ +arithmetic/SoftFloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has +been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES +RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS +AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES, +COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE +EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE +INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR +OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) the source code for the derivative work includes prominent notice that +the work is derivative, and (2) the source code includes prominent notice with +these four paragraphs for those parts of this code that are retained. + +=============================================================================*/ + +#ifndef SOFTFLOAT_H +#define SOFTFLOAT_H + +#if defined(CONFIG_SOLARIS) && defined(CONFIG_NEEDS_LIBSUNMATH) +#include <sunmath.h> +#endif + +#include <inttypes.h> +#include "config-host.h" +#include "qemu/osdep.h" + +/*---------------------------------------------------------------------------- +| Each of the following `typedef's defines the most convenient type that holds +| integers of at least as many bits as specified. For example, `uint8' should +| be the most convenient type that can hold unsigned integers of as many as +| 8 bits. The `flag' type must be able to hold either a 0 or 1. For most +| implementations of C, `flag', `uint8', and `int8' should all be `typedef'ed +| to the same as `int'. +*----------------------------------------------------------------------------*/ +typedef uint8_t flag; +typedef uint8_t uint8; +typedef int8_t int8; +typedef unsigned int uint32; +typedef signed int int32; +typedef uint64_t uint64; +typedef int64_t int64; + +#define LIT64( a ) a##LL +#define INLINE static inline + +#define STATUS_PARAM , float_status *status +#define STATUS(field) status->field +#define STATUS_VAR , status + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE floating-point ordering relations +*----------------------------------------------------------------------------*/ +enum { + float_relation_less = -1, + float_relation_equal = 0, + float_relation_greater = 1, + float_relation_unordered = 2 +}; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE floating-point types. +*----------------------------------------------------------------------------*/ +/* Use structures for soft-float types. This prevents accidentally mixing + them with native int/float types. A sufficiently clever compiler and + sane ABI should be able to see though these structs. However + x86/gcc 3.x seems to struggle a bit, so leave them disabled by default. */ +//#define USE_SOFTFLOAT_STRUCT_TYPES +#ifdef USE_SOFTFLOAT_STRUCT_TYPES +typedef struct { + uint16_t v; +} float16; +#define float16_val(x) (((float16)(x)).v) +#define make_float16(x) __extension__ ({ float16 f16_val = {x}; f16_val; }) +#define const_float16(x) { x } +typedef struct { + uint32_t v; +} float32; +/* The cast ensures an error if the wrong type is passed. */ +#define float32_val(x) (((float32)(x)).v) +#define make_float32(x) __extension__ ({ float32 f32_val = {x}; f32_val; }) +#define const_float32(x) { x } +typedef struct { + uint64_t v; +} float64; +#define float64_val(x) (((float64)(x)).v) +#define make_float64(x) __extension__ ({ float64 f64_val = {x}; f64_val; }) +#define const_float64(x) { x } +#else +typedef uint16_t float16; +typedef uint32_t float32; +typedef uint64_t float64; +#define float16_val(x) (x) +#define float32_val(x) (x) +#define float64_val(x) (x) +#define make_float16(x) (x) +#define make_float32(x) (x) +#define make_float64(x) (x) +#define const_float16(x) (x) +#define const_float32(x) (x) +#define const_float64(x) (x) +#endif +typedef struct { + uint64_t low; + uint16_t high; +} floatx80; +#define make_floatx80(exp, mant) ((floatx80) { mant, exp }) +#define make_floatx80_init(exp, mant) { .low = mant, .high = exp } +typedef struct { +#ifdef HOST_WORDS_BIGENDIAN + uint64_t high, low; +#else + uint64_t low, high; +#endif +} float128; +#define make_float128(high_, low_) ((float128) { .high = high_, .low = low_ }) +#define make_float128_init(high_, low_) { .high = high_, .low = low_ } + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE floating-point underflow tininess-detection mode. +*----------------------------------------------------------------------------*/ +enum { + float_tininess_after_rounding = 0, + float_tininess_before_rounding = 1 +}; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE floating-point rounding mode. +*----------------------------------------------------------------------------*/ +enum { + float_round_nearest_even = 0, + float_round_down = 1, + float_round_up = 2, + float_round_to_zero = 3 +}; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE floating-point exception flags. +*----------------------------------------------------------------------------*/ +enum { + float_flag_invalid = 1, + float_flag_divbyzero = 4, + float_flag_overflow = 8, + float_flag_underflow = 16, + float_flag_inexact = 32, + float_flag_input_denormal = 64, + float_flag_output_denormal = 128 +}; + +typedef struct float_status { + signed char float_detect_tininess; + signed char float_rounding_mode; + signed char float_exception_flags; + signed char floatx80_rounding_precision; + /* should denormalised results go to zero and set the inexact flag? */ + flag flush_to_zero; + /* should denormalised inputs go to zero and set the input_denormal flag? */ + flag flush_inputs_to_zero; + flag default_nan_mode; +} float_status; + +void set_float_rounding_mode(int val STATUS_PARAM); +void set_float_exception_flags(int val STATUS_PARAM); +INLINE void set_float_detect_tininess(int val STATUS_PARAM) +{ + STATUS(float_detect_tininess) = val; +} +INLINE void set_flush_to_zero(flag val STATUS_PARAM) +{ + STATUS(flush_to_zero) = val; +} +INLINE void set_flush_inputs_to_zero(flag val STATUS_PARAM) +{ + STATUS(flush_inputs_to_zero) = val; +} +INLINE void set_default_nan_mode(flag val STATUS_PARAM) +{ + STATUS(default_nan_mode) = val; +} +INLINE int get_float_exception_flags(float_status *status) +{ + return STATUS(float_exception_flags); +} +void set_floatx80_rounding_precision(int val STATUS_PARAM); + +/*---------------------------------------------------------------------------- +| Routine to raise any or all of the software IEC/IEEE floating-point +| exception flags. +*----------------------------------------------------------------------------*/ +void float_raise( int8 flags STATUS_PARAM); + +/*---------------------------------------------------------------------------- +| Options to indicate which negations to perform in float*_muladd() +| Using these differs from negating an input or output before calling +| the muladd function in that this means that a NaN doesn't have its +| sign bit inverted before it is propagated. +*----------------------------------------------------------------------------*/ +enum { + float_muladd_negate_c = 1, + float_muladd_negate_product = 2, + float_muladd_negate_result = 4, +}; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE integer-to-floating-point conversion routines. +*----------------------------------------------------------------------------*/ +float32 int32_to_float32( int32 STATUS_PARAM ); +float64 int32_to_float64( int32 STATUS_PARAM ); +float32 uint32_to_float32( uint32 STATUS_PARAM ); +float64 uint32_to_float64( uint32 STATUS_PARAM ); +floatx80 int32_to_floatx80( int32 STATUS_PARAM ); +float128 int32_to_float128( int32 STATUS_PARAM ); +float32 int64_to_float32( int64 STATUS_PARAM ); +float32 uint64_to_float32( uint64 STATUS_PARAM ); +float64 int64_to_float64( int64 STATUS_PARAM ); +float64 uint64_to_float64( uint64 STATUS_PARAM ); +floatx80 int64_to_floatx80( int64 STATUS_PARAM ); +float128 int64_to_float128( int64 STATUS_PARAM ); +float128 uint64_to_float128( uint64 STATUS_PARAM ); + +/*---------------------------------------------------------------------------- +| Software half-precision conversion routines. +*----------------------------------------------------------------------------*/ +float16 float32_to_float16( float32, flag STATUS_PARAM ); +float32 float16_to_float32( float16, flag STATUS_PARAM ); + +/*---------------------------------------------------------------------------- +| Software half-precision operations. +*----------------------------------------------------------------------------*/ +int float16_is_quiet_nan( float16 ); +int float16_is_signaling_nan( float16 ); +float16 float16_maybe_silence_nan( float16 ); + +INLINE int float16_is_any_nan(float16 a) +{ + return ((float16_val(a) & ~0x8000) > 0x7c00); +} + +/*---------------------------------------------------------------------------- +| The pattern for a default generated half-precision NaN. +*----------------------------------------------------------------------------*/ +extern const float16 float16_default_nan; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE single-precision conversion routines. +*----------------------------------------------------------------------------*/ +int_fast16_t float32_to_int16_round_to_zero(float32 STATUS_PARAM); +uint_fast16_t float32_to_uint16_round_to_zero(float32 STATUS_PARAM); +int32 float32_to_int32( float32 STATUS_PARAM ); +int32 float32_to_int32_round_to_zero( float32 STATUS_PARAM ); +uint32 float32_to_uint32( float32 STATUS_PARAM ); +uint32 float32_to_uint32_round_to_zero( float32 STATUS_PARAM ); +int64 float32_to_int64( float32 STATUS_PARAM ); +int64 float32_to_int64_round_to_zero( float32 STATUS_PARAM ); +float64 float32_to_float64( float32 STATUS_PARAM ); +floatx80 float32_to_floatx80( float32 STATUS_PARAM ); +float128 float32_to_float128( float32 STATUS_PARAM ); + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE single-precision operations. +*----------------------------------------------------------------------------*/ +float32 float32_round_to_int( float32 STATUS_PARAM ); +float32 float32_add( float32, float32 STATUS_PARAM ); +float32 float32_sub( float32, float32 STATUS_PARAM ); +float32 float32_mul( float32, float32 STATUS_PARAM ); +float32 float32_div( float32, float32 STATUS_PARAM ); +float32 float32_rem( float32, float32 STATUS_PARAM ); +float32 float32_muladd(float32, float32, float32, int STATUS_PARAM); +float32 float32_sqrt( float32 STATUS_PARAM ); +float32 float32_exp2( float32 STATUS_PARAM ); +float32 float32_log2( float32 STATUS_PARAM ); +int float32_eq( float32, float32 STATUS_PARAM ); +int float32_le( float32, float32 STATUS_PARAM ); +int float32_lt( float32, float32 STATUS_PARAM ); +int float32_unordered( float32, float32 STATUS_PARAM ); +int float32_eq_quiet( float32, float32 STATUS_PARAM ); +int float32_le_quiet( float32, float32 STATUS_PARAM ); +int float32_lt_quiet( float32, float32 STATUS_PARAM ); +int float32_unordered_quiet( float32, float32 STATUS_PARAM ); +int float32_compare( float32, float32 STATUS_PARAM ); +int float32_compare_quiet( float32, float32 STATUS_PARAM ); +float32 float32_min(float32, float32 STATUS_PARAM); +float32 float32_max(float32, float32 STATUS_PARAM); +int float32_is_quiet_nan( float32 ); +int float32_is_signaling_nan( float32 ); +float32 float32_maybe_silence_nan( float32 ); +float32 float32_scalbn( float32, int STATUS_PARAM ); + +INLINE float32 float32_abs(float32 a) +{ + /* Note that abs does *not* handle NaN specially, nor does + * it flush denormal inputs to zero. + */ + return make_float32(float32_val(a) & 0x7fffffff); +} + +INLINE float32 float32_chs(float32 a) +{ + /* Note that chs does *not* handle NaN specially, nor does + * it flush denormal inputs to zero. + */ + return make_float32(float32_val(a) ^ 0x80000000); +} + +INLINE int float32_is_infinity(float32 a) +{ + return (float32_val(a) & 0x7fffffff) == 0x7f800000; +} + +INLINE int float32_is_neg(float32 a) +{ + return float32_val(a) >> 31; +} + +INLINE int float32_is_zero(float32 a) +{ + return (float32_val(a) & 0x7fffffff) == 0; +} + +INLINE int float32_is_any_nan(float32 a) +{ + return ((float32_val(a) & ~(1 << 31)) > 0x7f800000UL); +} + +INLINE int float32_is_zero_or_denormal(float32 a) +{ + return (float32_val(a) & 0x7f800000) == 0; +} + +INLINE float32 float32_set_sign(float32 a, int sign) +{ + return make_float32((float32_val(a) & 0x7fffffff) | (sign << 31)); +} + +#define float32_zero make_float32(0) +#define float32_one make_float32(0x3f800000) +#define float32_ln2 make_float32(0x3f317218) +#define float32_pi make_float32(0x40490fdb) +#define float32_half make_float32(0x3f000000) +#define float32_infinity make_float32(0x7f800000) + + +/*---------------------------------------------------------------------------- +| The pattern for a default generated single-precision NaN. +*----------------------------------------------------------------------------*/ +extern const float32 float32_default_nan; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE double-precision conversion routines. +*----------------------------------------------------------------------------*/ +int_fast16_t float64_to_int16_round_to_zero(float64 STATUS_PARAM); +uint_fast16_t float64_to_uint16_round_to_zero(float64 STATUS_PARAM); +int32 float64_to_int32( float64 STATUS_PARAM ); +int32 float64_to_int32_round_to_zero( float64 STATUS_PARAM ); +uint32 float64_to_uint32( float64 STATUS_PARAM ); +uint32 float64_to_uint32_round_to_zero( float64 STATUS_PARAM ); +int64 float64_to_int64( float64 STATUS_PARAM ); +int64 float64_to_int64_round_to_zero( float64 STATUS_PARAM ); +uint64 float64_to_uint64 (float64 a STATUS_PARAM); +uint64 float64_to_uint64_round_to_zero (float64 a STATUS_PARAM); +float32 float64_to_float32( float64 STATUS_PARAM ); +floatx80 float64_to_floatx80( float64 STATUS_PARAM ); +float128 float64_to_float128( float64 STATUS_PARAM ); + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE double-precision operations. +*----------------------------------------------------------------------------*/ +float64 float64_round_to_int( float64 STATUS_PARAM ); +float64 float64_trunc_to_int( float64 STATUS_PARAM ); +float64 float64_add( float64, float64 STATUS_PARAM ); +float64 float64_sub( float64, float64 STATUS_PARAM ); +float64 float64_mul( float64, float64 STATUS_PARAM ); +float64 float64_div( float64, float64 STATUS_PARAM ); +float64 float64_rem( float64, float64 STATUS_PARAM ); +float64 float64_muladd(float64, float64, float64, int STATUS_PARAM); +float64 float64_sqrt( float64 STATUS_PARAM ); +float64 float64_log2( float64 STATUS_PARAM ); +int float64_eq( float64, float64 STATUS_PARAM ); +int float64_le( float64, float64 STATUS_PARAM ); +int float64_lt( float64, float64 STATUS_PARAM ); +int float64_unordered( float64, float64 STATUS_PARAM ); +int float64_eq_quiet( float64, float64 STATUS_PARAM ); +int float64_le_quiet( float64, float64 STATUS_PARAM ); +int float64_lt_quiet( float64, float64 STATUS_PARAM ); +int float64_unordered_quiet( float64, float64 STATUS_PARAM ); +int float64_compare( float64, float64 STATUS_PARAM ); +int float64_compare_quiet( float64, float64 STATUS_PARAM ); +float64 float64_min(float64, float64 STATUS_PARAM); +float64 float64_max(float64, float64 STATUS_PARAM); +int float64_is_quiet_nan( float64 a ); +int float64_is_signaling_nan( float64 ); +float64 float64_maybe_silence_nan( float64 ); +float64 float64_scalbn( float64, int STATUS_PARAM ); + +INLINE float64 float64_abs(float64 a) +{ + /* Note that abs does *not* handle NaN specially, nor does + * it flush denormal inputs to zero. + */ + return make_float64(float64_val(a) & 0x7fffffffffffffffLL); +} + +INLINE float64 float64_chs(float64 a) +{ + /* Note that chs does *not* handle NaN specially, nor does + * it flush denormal inputs to zero. + */ + return make_float64(float64_val(a) ^ 0x8000000000000000LL); +} + +INLINE int float64_is_infinity(float64 a) +{ + return (float64_val(a) & 0x7fffffffffffffffLL ) == 0x7ff0000000000000LL; +} + +INLINE int float64_is_neg(float64 a) +{ + return float64_val(a) >> 63; +} + +INLINE int float64_is_zero(float64 a) +{ + return (float64_val(a) & 0x7fffffffffffffffLL) == 0; +} + +INLINE int float64_is_any_nan(float64 a) +{ + return ((float64_val(a) & ~(1ULL << 63)) > 0x7ff0000000000000ULL); +} + +INLINE int float64_is_zero_or_denormal(float64 a) +{ + return (float64_val(a) & 0x7ff0000000000000LL) == 0; +} + +INLINE float64 float64_set_sign(float64 a, int sign) +{ + return make_float64((float64_val(a) & 0x7fffffffffffffffULL) + | ((int64_t)sign << 63)); +} + +#define float64_zero make_float64(0) +#define float64_one make_float64(0x3ff0000000000000LL) +#define float64_ln2 make_float64(0x3fe62e42fefa39efLL) +#define float64_pi make_float64(0x400921fb54442d18LL) +#define float64_half make_float64(0x3fe0000000000000LL) +#define float64_infinity make_float64(0x7ff0000000000000LL) + +/*---------------------------------------------------------------------------- +| The pattern for a default generated double-precision NaN. +*----------------------------------------------------------------------------*/ +extern const float64 float64_default_nan; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE extended double-precision conversion routines. +*----------------------------------------------------------------------------*/ +int32 floatx80_to_int32( floatx80 STATUS_PARAM ); +int32 floatx80_to_int32_round_to_zero( floatx80 STATUS_PARAM ); +int64 floatx80_to_int64( floatx80 STATUS_PARAM ); +int64 floatx80_to_int64_round_to_zero( floatx80 STATUS_PARAM ); +float32 floatx80_to_float32( floatx80 STATUS_PARAM ); +float64 floatx80_to_float64( floatx80 STATUS_PARAM ); +float128 floatx80_to_float128( floatx80 STATUS_PARAM ); + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE extended double-precision operations. +*----------------------------------------------------------------------------*/ +floatx80 floatx80_round_to_int( floatx80 STATUS_PARAM ); +floatx80 floatx80_add( floatx80, floatx80 STATUS_PARAM ); +floatx80 floatx80_sub( floatx80, floatx80 STATUS_PARAM ); +floatx80 floatx80_mul( floatx80, floatx80 STATUS_PARAM ); +floatx80 floatx80_div( floatx80, floatx80 STATUS_PARAM ); +floatx80 floatx80_rem( floatx80, floatx80 STATUS_PARAM ); +floatx80 floatx80_sqrt( floatx80 STATUS_PARAM ); +int floatx80_eq( floatx80, floatx80 STATUS_PARAM ); +int floatx80_le( floatx80, floatx80 STATUS_PARAM ); +int floatx80_lt( floatx80, floatx80 STATUS_PARAM ); +int floatx80_unordered( floatx80, floatx80 STATUS_PARAM ); +int floatx80_eq_quiet( floatx80, floatx80 STATUS_PARAM ); +int floatx80_le_quiet( floatx80, floatx80 STATUS_PARAM ); +int floatx80_lt_quiet( floatx80, floatx80 STATUS_PARAM ); +int floatx80_unordered_quiet( floatx80, floatx80 STATUS_PARAM ); +int floatx80_compare( floatx80, floatx80 STATUS_PARAM ); +int floatx80_compare_quiet( floatx80, floatx80 STATUS_PARAM ); +int floatx80_is_quiet_nan( floatx80 ); +int floatx80_is_signaling_nan( floatx80 ); +floatx80 floatx80_maybe_silence_nan( floatx80 ); +floatx80 floatx80_scalbn( floatx80, int STATUS_PARAM ); + +INLINE floatx80 floatx80_abs(floatx80 a) +{ + a.high &= 0x7fff; + return a; +} + +INLINE floatx80 floatx80_chs(floatx80 a) +{ + a.high ^= 0x8000; + return a; +} + +INLINE int floatx80_is_infinity(floatx80 a) +{ + return (a.high & 0x7fff) == 0x7fff && a.low == 0x8000000000000000LL; +} + +INLINE int floatx80_is_neg(floatx80 a) +{ + return a.high >> 15; +} + +INLINE int floatx80_is_zero(floatx80 a) +{ + return (a.high & 0x7fff) == 0 && a.low == 0; +} + +INLINE int floatx80_is_zero_or_denormal(floatx80 a) +{ + return (a.high & 0x7fff) == 0; +} + +INLINE int floatx80_is_any_nan(floatx80 a) +{ + return ((a.high & 0x7fff) == 0x7fff) && (a.low<<1); +} + +#define floatx80_zero make_floatx80(0x0000, 0x0000000000000000LL) +#define floatx80_one make_floatx80(0x3fff, 0x8000000000000000LL) +#define floatx80_ln2 make_floatx80(0x3ffe, 0xb17217f7d1cf79acLL) +#define floatx80_pi make_floatx80(0x4000, 0xc90fdaa22168c235LL) +#define floatx80_half make_floatx80(0x3ffe, 0x8000000000000000LL) +#define floatx80_infinity make_floatx80(0x7fff, 0x8000000000000000LL) + +/*---------------------------------------------------------------------------- +| The pattern for a default generated extended double-precision NaN. +*----------------------------------------------------------------------------*/ +extern const floatx80 floatx80_default_nan; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE quadruple-precision conversion routines. +*----------------------------------------------------------------------------*/ +int32 float128_to_int32( float128 STATUS_PARAM ); +int32 float128_to_int32_round_to_zero( float128 STATUS_PARAM ); +int64 float128_to_int64( float128 STATUS_PARAM ); +int64 float128_to_int64_round_to_zero( float128 STATUS_PARAM ); +float32 float128_to_float32( float128 STATUS_PARAM ); +float64 float128_to_float64( float128 STATUS_PARAM ); +floatx80 float128_to_floatx80( float128 STATUS_PARAM ); + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE quadruple-precision operations. +*----------------------------------------------------------------------------*/ +float128 float128_round_to_int( float128 STATUS_PARAM ); +float128 float128_add( float128, float128 STATUS_PARAM ); +float128 float128_sub( float128, float128 STATUS_PARAM ); +float128 float128_mul( float128, float128 STATUS_PARAM ); +float128 float128_div( float128, float128 STATUS_PARAM ); +float128 float128_rem( float128, float128 STATUS_PARAM ); +float128 float128_sqrt( float128 STATUS_PARAM ); +int float128_eq( float128, float128 STATUS_PARAM ); +int float128_le( float128, float128 STATUS_PARAM ); +int float128_lt( float128, float128 STATUS_PARAM ); +int float128_unordered( float128, float128 STATUS_PARAM ); +int float128_eq_quiet( float128, float128 STATUS_PARAM ); +int float128_le_quiet( float128, float128 STATUS_PARAM ); +int float128_lt_quiet( float128, float128 STATUS_PARAM ); +int float128_unordered_quiet( float128, float128 STATUS_PARAM ); +int float128_compare( float128, float128 STATUS_PARAM ); +int float128_compare_quiet( float128, float128 STATUS_PARAM ); +int float128_is_quiet_nan( float128 ); +int float128_is_signaling_nan( float128 ); +float128 float128_maybe_silence_nan( float128 ); +float128 float128_scalbn( float128, int STATUS_PARAM ); + +INLINE float128 float128_abs(float128 a) +{ + a.high &= 0x7fffffffffffffffLL; + return a; +} + +INLINE float128 float128_chs(float128 a) +{ + a.high ^= 0x8000000000000000LL; + return a; +} + +INLINE int float128_is_infinity(float128 a) +{ + return (a.high & 0x7fffffffffffffffLL) == 0x7fff000000000000LL && a.low == 0; +} + +INLINE int float128_is_neg(float128 a) +{ + return a.high >> 63; +} + +INLINE int float128_is_zero(float128 a) +{ + return (a.high & 0x7fffffffffffffffLL) == 0 && a.low == 0; +} + +INLINE int float128_is_zero_or_denormal(float128 a) +{ + return (a.high & 0x7fff000000000000LL) == 0; +} + +INLINE int float128_is_any_nan(float128 a) +{ + return ((a.high >> 48) & 0x7fff) == 0x7fff && + ((a.low != 0) || ((a.high & 0xffffffffffffLL) != 0)); +} + +#define float128_zero make_float128(0, 0) + +/*---------------------------------------------------------------------------- +| The pattern for a default generated quadruple-precision NaN. +*----------------------------------------------------------------------------*/ +extern const float128 float128_default_nan; + +#endif /* !SOFTFLOAT_H */ diff --git a/contrib/qemu/include/glib-compat.h b/contrib/qemu/include/glib-compat.h new file mode 100644 index 00000000000..8aa77afd626 --- /dev/null +++ b/contrib/qemu/include/glib-compat.h @@ -0,0 +1,27 @@ +/* + * GLIB Compatibility Functions + * + * Copyright IBM, Corp. 2013 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_GLIB_COMPAT_H +#define QEMU_GLIB_COMPAT_H + +#include <glib.h> + +#if !GLIB_CHECK_VERSION(2, 14, 0) +static inline guint g_timeout_add_seconds(guint interval, GSourceFunc function, + gpointer data) +{ + return g_timeout_add(interval * 1000, function, data); +} +#endif + +#endif diff --git a/contrib/qemu/include/migration/migration.h b/contrib/qemu/include/migration/migration.h new file mode 100644 index 00000000000..bc9fde0b2ab --- /dev/null +++ b/contrib/qemu/include/migration/migration.h @@ -0,0 +1,157 @@ +/* + * QEMU live migration + * + * Copyright IBM, Corp. 2008 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_MIGRATION_H +#define QEMU_MIGRATION_H + +#include "qapi/qmp/qdict.h" +#include "qemu-common.h" +#include "qemu/thread.h" +#include "qemu/notify.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "qapi-types.h" +#include "exec/cpu-common.h" + +struct MigrationParams { + bool blk; + bool shared; +}; + +typedef struct MigrationState MigrationState; + +struct MigrationState +{ + int64_t bandwidth_limit; + size_t bytes_xfer; + size_t xfer_limit; + QemuThread thread; + QEMUBH *cleanup_bh; + QEMUFile *file; + + int state; + MigrationParams params; + double mbps; + int64_t total_time; + int64_t downtime; + int64_t expected_downtime; + int64_t dirty_pages_rate; + int64_t dirty_bytes_rate; + bool enabled_capabilities[MIGRATION_CAPABILITY_MAX]; + int64_t xbzrle_cache_size; +}; + +void process_incoming_migration(QEMUFile *f); + +void qemu_start_incoming_migration(const char *uri, Error **errp); + +uint64_t migrate_max_downtime(void); + +void do_info_migrate_print(Monitor *mon, const QObject *data); + +void do_info_migrate(Monitor *mon, QObject **ret_data); + +void exec_start_incoming_migration(const char *host_port, Error **errp); + +void exec_start_outgoing_migration(MigrationState *s, const char *host_port, Error **errp); + +void tcp_start_incoming_migration(const char *host_port, Error **errp); + +void tcp_start_outgoing_migration(MigrationState *s, const char *host_port, Error **errp); + +void unix_start_incoming_migration(const char *path, Error **errp); + +void unix_start_outgoing_migration(MigrationState *s, const char *path, Error **errp); + +void fd_start_incoming_migration(const char *path, Error **errp); + +void fd_start_outgoing_migration(MigrationState *s, const char *fdname, Error **errp); + +void migrate_fd_error(MigrationState *s); + +void migrate_fd_connect(MigrationState *s); + +int migrate_fd_close(MigrationState *s); + +void add_migration_state_change_notifier(Notifier *notify); +void remove_migration_state_change_notifier(Notifier *notify); +bool migration_is_active(MigrationState *); +bool migration_has_finished(MigrationState *); +bool migration_has_failed(MigrationState *); +MigrationState *migrate_get_current(void); + +uint64_t ram_bytes_remaining(void); +uint64_t ram_bytes_transferred(void); +uint64_t ram_bytes_total(void); + +void acct_update_position(QEMUFile *f, size_t size, bool zero); + +extern SaveVMHandlers savevm_ram_handlers; + +uint64_t dup_mig_bytes_transferred(void); +uint64_t dup_mig_pages_transferred(void); +uint64_t skipped_mig_bytes_transferred(void); +uint64_t skipped_mig_pages_transferred(void); +uint64_t norm_mig_bytes_transferred(void); +uint64_t norm_mig_pages_transferred(void); +uint64_t xbzrle_mig_bytes_transferred(void); +uint64_t xbzrle_mig_pages_transferred(void); +uint64_t xbzrle_mig_pages_overflow(void); +uint64_t xbzrle_mig_pages_cache_miss(void); + +/** + * @migrate_add_blocker - prevent migration from proceeding + * + * @reason - an error to be returned whenever migration is attempted + */ +void migrate_add_blocker(Error *reason); + +/** + * @migrate_del_blocker - remove a blocking error from migration + * + * @reason - the error blocking migration + */ +void migrate_del_blocker(Error *reason); + +bool migrate_rdma_pin_all(void); + +bool migrate_auto_converge(void); + +int xbzrle_encode_buffer(uint8_t *old_buf, uint8_t *new_buf, int slen, + uint8_t *dst, int dlen); +int xbzrle_decode_buffer(uint8_t *src, int slen, uint8_t *dst, int dlen); + +int migrate_use_xbzrle(void); +int64_t migrate_xbzrle_cache_size(void); + +int64_t xbzrle_cache_resize(int64_t new_size); + +void ram_control_before_iterate(QEMUFile *f, uint64_t flags); +void ram_control_after_iterate(QEMUFile *f, uint64_t flags); +void ram_control_load_hook(QEMUFile *f, uint64_t flags); + +/* Whenever this is found in the data stream, the flags + * will be passed to ram_control_load_hook in the incoming-migration + * side. This lets before_ram_iterate/after_ram_iterate add + * transport-specific sections to the RAM migration data. + */ +#define RAM_SAVE_FLAG_HOOK 0x80 + +#define RAM_SAVE_CONTROL_NOT_SUPP -1000 +#define RAM_SAVE_CONTROL_DELAYED -2000 + +size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset, + ram_addr_t offset, size_t size, + int *bytes_sent); + +#endif diff --git a/contrib/qemu/include/migration/qemu-file.h b/contrib/qemu/include/migration/qemu-file.h new file mode 100644 index 00000000000..0f757fbeb63 --- /dev/null +++ b/contrib/qemu/include/migration/qemu-file.h @@ -0,0 +1,266 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef QEMU_FILE_H +#define QEMU_FILE_H 1 +#include "exec/cpu-common.h" + +/* This function writes a chunk of data to a file at the given position. + * The pos argument can be ignored if the file is only being used for + * streaming. The handler should try to write all of the data it can. + */ +typedef int (QEMUFilePutBufferFunc)(void *opaque, const uint8_t *buf, + int64_t pos, int size); + +/* Read a chunk of data from a file at the given position. The pos argument + * can be ignored if the file is only be used for streaming. The number of + * bytes actually read should be returned. + */ +typedef int (QEMUFileGetBufferFunc)(void *opaque, uint8_t *buf, + int64_t pos, int size); + +/* Close a file + * + * Return negative error number on error, 0 or positive value on success. + * + * The meaning of return value on success depends on the specific back-end being + * used. + */ +typedef int (QEMUFileCloseFunc)(void *opaque); + +/* Called to return the OS file descriptor associated to the QEMUFile. + */ +typedef int (QEMUFileGetFD)(void *opaque); + +/* + * This function writes an iovec to file. + */ +typedef ssize_t (QEMUFileWritevBufferFunc)(void *opaque, struct iovec *iov, + int iovcnt, int64_t pos); + +/* + * This function provides hooks around different + * stages of RAM migration. + */ +typedef int (QEMURamHookFunc)(QEMUFile *f, void *opaque, uint64_t flags); + +/* + * Constants used by ram_control_* hooks + */ +#define RAM_CONTROL_SETUP 0 +#define RAM_CONTROL_ROUND 1 +#define RAM_CONTROL_HOOK 2 +#define RAM_CONTROL_FINISH 3 + +/* + * This function allows override of where the RAM page + * is saved (such as RDMA, for example.) + */ +typedef size_t (QEMURamSaveFunc)(QEMUFile *f, void *opaque, + ram_addr_t block_offset, + ram_addr_t offset, + size_t size, + int *bytes_sent); + +typedef struct QEMUFileOps { + QEMUFilePutBufferFunc *put_buffer; + QEMUFileGetBufferFunc *get_buffer; + QEMUFileCloseFunc *close; + QEMUFileGetFD *get_fd; + QEMUFileWritevBufferFunc *writev_buffer; + QEMURamHookFunc *before_ram_iterate; + QEMURamHookFunc *after_ram_iterate; + QEMURamHookFunc *hook_ram_load; + QEMURamSaveFunc *save_page; +} QEMUFileOps; + +QEMUFile *qemu_fopen_ops(void *opaque, const QEMUFileOps *ops); +QEMUFile *qemu_fopen(const char *filename, const char *mode); +QEMUFile *qemu_fdopen(int fd, const char *mode); +QEMUFile *qemu_fopen_socket(int fd, const char *mode); +QEMUFile *qemu_popen_cmd(const char *command, const char *mode); +int qemu_get_fd(QEMUFile *f); +int qemu_fclose(QEMUFile *f); +int64_t qemu_ftell(QEMUFile *f); +void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, int size); +void qemu_put_byte(QEMUFile *f, int v); +/* + * put_buffer without copying the buffer. + * The buffer should be available till it is sent asynchronously. + */ +void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, int size); +bool qemu_file_mode_is_not_valid(const char *mode); + +static inline void qemu_put_ubyte(QEMUFile *f, unsigned int v) +{ + qemu_put_byte(f, (int)v); +} + +#define qemu_put_sbyte qemu_put_byte + +void qemu_put_be16(QEMUFile *f, unsigned int v); +void qemu_put_be32(QEMUFile *f, unsigned int v); +void qemu_put_be64(QEMUFile *f, uint64_t v); +int qemu_get_buffer(QEMUFile *f, uint8_t *buf, int size); +int qemu_get_byte(QEMUFile *f); +void qemu_update_position(QEMUFile *f, size_t size); + +static inline unsigned int qemu_get_ubyte(QEMUFile *f) +{ + return (unsigned int)qemu_get_byte(f); +} + +#define qemu_get_sbyte qemu_get_byte + +unsigned int qemu_get_be16(QEMUFile *f); +unsigned int qemu_get_be32(QEMUFile *f); +uint64_t qemu_get_be64(QEMUFile *f); + +int qemu_file_rate_limit(QEMUFile *f); +void qemu_file_reset_rate_limit(QEMUFile *f); +void qemu_file_set_rate_limit(QEMUFile *f, int64_t new_rate); +int64_t qemu_file_get_rate_limit(QEMUFile *f); +int qemu_file_get_error(QEMUFile *f); +void qemu_fflush(QEMUFile *f); + +static inline void qemu_put_be64s(QEMUFile *f, const uint64_t *pv) +{ + qemu_put_be64(f, *pv); +} + +static inline void qemu_put_be32s(QEMUFile *f, const uint32_t *pv) +{ + qemu_put_be32(f, *pv); +} + +static inline void qemu_put_be16s(QEMUFile *f, const uint16_t *pv) +{ + qemu_put_be16(f, *pv); +} + +static inline void qemu_put_8s(QEMUFile *f, const uint8_t *pv) +{ + qemu_put_byte(f, *pv); +} + +static inline void qemu_get_be64s(QEMUFile *f, uint64_t *pv) +{ + *pv = qemu_get_be64(f); +} + +static inline void qemu_get_be32s(QEMUFile *f, uint32_t *pv) +{ + *pv = qemu_get_be32(f); +} + +static inline void qemu_get_be16s(QEMUFile *f, uint16_t *pv) +{ + *pv = qemu_get_be16(f); +} + +static inline void qemu_get_8s(QEMUFile *f, uint8_t *pv) +{ + *pv = qemu_get_byte(f); +} + +// Signed versions for type safety +static inline void qemu_put_sbuffer(QEMUFile *f, const int8_t *buf, int size) +{ + qemu_put_buffer(f, (const uint8_t *)buf, size); +} + +static inline void qemu_put_sbe16(QEMUFile *f, int v) +{ + qemu_put_be16(f, (unsigned int)v); +} + +static inline void qemu_put_sbe32(QEMUFile *f, int v) +{ + qemu_put_be32(f, (unsigned int)v); +} + +static inline void qemu_put_sbe64(QEMUFile *f, int64_t v) +{ + qemu_put_be64(f, (uint64_t)v); +} + +static inline size_t qemu_get_sbuffer(QEMUFile *f, int8_t *buf, int size) +{ + return qemu_get_buffer(f, (uint8_t *)buf, size); +} + +static inline int qemu_get_sbe16(QEMUFile *f) +{ + return (int)qemu_get_be16(f); +} + +static inline int qemu_get_sbe32(QEMUFile *f) +{ + return (int)qemu_get_be32(f); +} + +static inline int64_t qemu_get_sbe64(QEMUFile *f) +{ + return (int64_t)qemu_get_be64(f); +} + +static inline void qemu_put_s8s(QEMUFile *f, const int8_t *pv) +{ + qemu_put_8s(f, (const uint8_t *)pv); +} + +static inline void qemu_put_sbe16s(QEMUFile *f, const int16_t *pv) +{ + qemu_put_be16s(f, (const uint16_t *)pv); +} + +static inline void qemu_put_sbe32s(QEMUFile *f, const int32_t *pv) +{ + qemu_put_be32s(f, (const uint32_t *)pv); +} + +static inline void qemu_put_sbe64s(QEMUFile *f, const int64_t *pv) +{ + qemu_put_be64s(f, (const uint64_t *)pv); +} + +static inline void qemu_get_s8s(QEMUFile *f, int8_t *pv) +{ + qemu_get_8s(f, (uint8_t *)pv); +} + +static inline void qemu_get_sbe16s(QEMUFile *f, int16_t *pv) +{ + qemu_get_be16s(f, (uint16_t *)pv); +} + +static inline void qemu_get_sbe32s(QEMUFile *f, int32_t *pv) +{ + qemu_get_be32s(f, (uint32_t *)pv); +} + +static inline void qemu_get_sbe64s(QEMUFile *f, int64_t *pv) +{ + qemu_get_be64s(f, (uint64_t *)pv); +} +#endif diff --git a/contrib/qemu/include/migration/vmstate.h b/contrib/qemu/include/migration/vmstate.h new file mode 100644 index 00000000000..1c31b5d6fb5 --- /dev/null +++ b/contrib/qemu/include/migration/vmstate.h @@ -0,0 +1,740 @@ +/* + * QEMU migration/snapshot declarations + * + * Copyright (c) 2009-2011 Red Hat, Inc. + * + * Original author: Juan Quintela <quintela@redhat.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef QEMU_VMSTATE_H +#define QEMU_VMSTATE_H 1 + +#ifndef CONFIG_USER_ONLY +#include <migration/qemu-file.h> +#endif + +typedef void SaveStateHandler(QEMUFile *f, void *opaque); +typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id); + +typedef struct SaveVMHandlers { + /* This runs inside the iothread lock. */ + void (*set_params)(const MigrationParams *params, void * opaque); + SaveStateHandler *save_state; + + void (*cancel)(void *opaque); + int (*save_live_complete)(QEMUFile *f, void *opaque); + + /* This runs both outside and inside the iothread lock. */ + bool (*is_active)(void *opaque); + + /* This runs outside the iothread lock in the migration case, and + * within the lock in the savevm case. The callback had better only + * use data that is local to the migration thread or protected + * by other locks. + */ + int (*save_live_iterate)(QEMUFile *f, void *opaque); + + /* This runs outside the iothread lock! */ + int (*save_live_setup)(QEMUFile *f, void *opaque); + uint64_t (*save_live_pending)(QEMUFile *f, void *opaque, uint64_t max_size); + + LoadStateHandler *load_state; +} SaveVMHandlers; + +int register_savevm(DeviceState *dev, + const char *idstr, + int instance_id, + int version_id, + SaveStateHandler *save_state, + LoadStateHandler *load_state, + void *opaque); + +int register_savevm_live(DeviceState *dev, + const char *idstr, + int instance_id, + int version_id, + SaveVMHandlers *ops, + void *opaque); + +void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque); +void register_device_unmigratable(DeviceState *dev, const char *idstr, + void *opaque); + + +typedef struct VMStateInfo VMStateInfo; +typedef struct VMStateDescription VMStateDescription; + +struct VMStateInfo { + const char *name; + int (*get)(QEMUFile *f, void *pv, size_t size); + void (*put)(QEMUFile *f, void *pv, size_t size); +}; + +enum VMStateFlags { + VMS_SINGLE = 0x001, + VMS_POINTER = 0x002, + VMS_ARRAY = 0x004, + VMS_STRUCT = 0x008, + VMS_VARRAY_INT32 = 0x010, /* Array with size in int32_t field*/ + VMS_BUFFER = 0x020, /* static sized buffer */ + VMS_ARRAY_OF_POINTER = 0x040, + VMS_VARRAY_UINT16 = 0x080, /* Array with size in uint16_t field */ + VMS_VBUFFER = 0x100, /* Buffer with size in int32_t field */ + VMS_MULTIPLY = 0x200, /* multiply "size" field by field_size */ + VMS_VARRAY_UINT8 = 0x400, /* Array with size in uint8_t field*/ + VMS_VARRAY_UINT32 = 0x800, /* Array with size in uint32_t field*/ +}; + +typedef struct { + const char *name; + size_t offset; + size_t size; + size_t start; + int num; + size_t num_offset; + size_t size_offset; + const VMStateInfo *info; + enum VMStateFlags flags; + const VMStateDescription *vmsd; + int version_id; + bool (*field_exists)(void *opaque, int version_id); +} VMStateField; + +typedef struct VMStateSubsection { + const VMStateDescription *vmsd; + bool (*needed)(void *opaque); +} VMStateSubsection; + +struct VMStateDescription { + const char *name; + int unmigratable; + int version_id; + int minimum_version_id; + int minimum_version_id_old; + LoadStateHandler *load_state_old; + int (*pre_load)(void *opaque); + int (*post_load)(void *opaque, int version_id); + void (*pre_save)(void *opaque); + VMStateField *fields; + const VMStateSubsection *subsections; +}; + +#ifdef CONFIG_USER_ONLY +extern const VMStateDescription vmstate_dummy; +#endif + +extern const VMStateInfo vmstate_info_bool; + +extern const VMStateInfo vmstate_info_int8; +extern const VMStateInfo vmstate_info_int16; +extern const VMStateInfo vmstate_info_int32; +extern const VMStateInfo vmstate_info_int64; + +extern const VMStateInfo vmstate_info_uint8_equal; +extern const VMStateInfo vmstate_info_uint16_equal; +extern const VMStateInfo vmstate_info_int32_equal; +extern const VMStateInfo vmstate_info_uint32_equal; +extern const VMStateInfo vmstate_info_uint64_equal; +extern const VMStateInfo vmstate_info_int32_le; + +extern const VMStateInfo vmstate_info_uint8; +extern const VMStateInfo vmstate_info_uint16; +extern const VMStateInfo vmstate_info_uint32; +extern const VMStateInfo vmstate_info_uint64; + +extern const VMStateInfo vmstate_info_float64; + +extern const VMStateInfo vmstate_info_timer; +extern const VMStateInfo vmstate_info_buffer; +extern const VMStateInfo vmstate_info_unused_buffer; +extern const VMStateInfo vmstate_info_bitmap; + +#define type_check_2darray(t1,t2,n,m) ((t1(*)[n][m])0 - (t2*)0) +#define type_check_array(t1,t2,n) ((t1(*)[n])0 - (t2*)0) +#define type_check_pointer(t1,t2) ((t1**)0 - (t2*)0) + +#define vmstate_offset_value(_state, _field, _type) \ + (offsetof(_state, _field) + \ + type_check(_type, typeof_field(_state, _field))) + +#define vmstate_offset_pointer(_state, _field, _type) \ + (offsetof(_state, _field) + \ + type_check_pointer(_type, typeof_field(_state, _field))) + +#define vmstate_offset_array(_state, _field, _type, _num) \ + (offsetof(_state, _field) + \ + type_check_array(_type, typeof_field(_state, _field), _num)) + +#define vmstate_offset_2darray(_state, _field, _type, _n1, _n2) \ + (offsetof(_state, _field) + \ + type_check_2darray(_type, typeof_field(_state, _field), _n1, _n2)) + +#define vmstate_offset_sub_array(_state, _field, _type, _start) \ + (offsetof(_state, _field[_start])) + +#define vmstate_offset_buffer(_state, _field) \ + vmstate_offset_array(_state, _field, uint8_t, \ + sizeof(typeof_field(_state, _field))) + +#define VMSTATE_SINGLE_TEST(_field, _state, _test, _version, _info, _type) { \ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .field_exists = (_test), \ + .size = sizeof(_type), \ + .info = &(_info), \ + .flags = VMS_SINGLE, \ + .offset = vmstate_offset_value(_state, _field, _type), \ +} + +#define VMSTATE_POINTER(_field, _state, _version, _info, _type) { \ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .info = &(_info), \ + .size = sizeof(_type), \ + .flags = VMS_SINGLE|VMS_POINTER, \ + .offset = vmstate_offset_value(_state, _field, _type), \ +} + +#define VMSTATE_POINTER_TEST(_field, _state, _test, _info, _type) { \ + .name = (stringify(_field)), \ + .info = &(_info), \ + .field_exists = (_test), \ + .size = sizeof(_type), \ + .flags = VMS_SINGLE|VMS_POINTER, \ + .offset = vmstate_offset_value(_state, _field, _type), \ +} + +#define VMSTATE_ARRAY(_field, _state, _num, _version, _info, _type) {\ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .num = (_num), \ + .info = &(_info), \ + .size = sizeof(_type), \ + .flags = VMS_ARRAY, \ + .offset = vmstate_offset_array(_state, _field, _type, _num), \ +} + +#define VMSTATE_2DARRAY(_field, _state, _n1, _n2, _version, _info, _type) { \ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .num = (_n1) * (_n2), \ + .info = &(_info), \ + .size = sizeof(_type), \ + .flags = VMS_ARRAY, \ + .offset = vmstate_offset_2darray(_state, _field, _type, _n1, _n2), \ +} + +#define VMSTATE_ARRAY_TEST(_field, _state, _num, _test, _info, _type) {\ + .name = (stringify(_field)), \ + .field_exists = (_test), \ + .num = (_num), \ + .info = &(_info), \ + .size = sizeof(_type), \ + .flags = VMS_ARRAY, \ + .offset = vmstate_offset_array(_state, _field, _type, _num),\ +} + +#define VMSTATE_SUB_ARRAY(_field, _state, _start, _num, _version, _info, _type) { \ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .num = (_num), \ + .info = &(_info), \ + .size = sizeof(_type), \ + .flags = VMS_ARRAY, \ + .offset = vmstate_offset_sub_array(_state, _field, _type, _start), \ +} + +#define VMSTATE_ARRAY_INT32_UNSAFE(_field, _state, _field_num, _info, _type) {\ + .name = (stringify(_field)), \ + .num_offset = vmstate_offset_value(_state, _field_num, int32_t), \ + .info = &(_info), \ + .size = sizeof(_type), \ + .flags = VMS_VARRAY_INT32, \ + .offset = offsetof(_state, _field), \ +} + +#define VMSTATE_VARRAY_INT32(_field, _state, _field_num, _version, _info, _type) {\ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .num_offset = vmstate_offset_value(_state, _field_num, int32_t), \ + .info = &(_info), \ + .size = sizeof(_type), \ + .flags = VMS_VARRAY_INT32|VMS_POINTER, \ + .offset = vmstate_offset_pointer(_state, _field, _type), \ +} + +#define VMSTATE_VARRAY_UINT32(_field, _state, _field_num, _version, _info, _type) {\ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .num_offset = vmstate_offset_value(_state, _field_num, uint32_t),\ + .info = &(_info), \ + .size = sizeof(_type), \ + .flags = VMS_VARRAY_UINT32|VMS_POINTER, \ + .offset = vmstate_offset_pointer(_state, _field, _type), \ +} + +#define VMSTATE_VARRAY_UINT16_UNSAFE(_field, _state, _field_num, _version, _info, _type) {\ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .num_offset = vmstate_offset_value(_state, _field_num, uint16_t),\ + .info = &(_info), \ + .size = sizeof(_type), \ + .flags = VMS_VARRAY_UINT16, \ + .offset = offsetof(_state, _field), \ +} + +#define VMSTATE_STRUCT_TEST(_field, _state, _test, _version, _vmsd, _type) { \ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .field_exists = (_test), \ + .vmsd = &(_vmsd), \ + .size = sizeof(_type), \ + .flags = VMS_STRUCT, \ + .offset = vmstate_offset_value(_state, _field, _type), \ +} + +#define VMSTATE_STRUCT_POINTER_TEST(_field, _state, _test, _vmsd, _type) { \ + .name = (stringify(_field)), \ + .field_exists = (_test), \ + .vmsd = &(_vmsd), \ + .size = sizeof(_type), \ + .flags = VMS_STRUCT|VMS_POINTER, \ + .offset = vmstate_offset_value(_state, _field, _type), \ +} + +#define VMSTATE_ARRAY_OF_POINTER(_field, _state, _num, _version, _info, _type) {\ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .num = (_num), \ + .info = &(_info), \ + .size = sizeof(_type), \ + .flags = VMS_ARRAY|VMS_ARRAY_OF_POINTER, \ + .offset = vmstate_offset_array(_state, _field, _type, _num), \ +} + +#define VMSTATE_STRUCT_ARRAY_TEST(_field, _state, _num, _test, _version, _vmsd, _type) { \ + .name = (stringify(_field)), \ + .num = (_num), \ + .field_exists = (_test), \ + .version_id = (_version), \ + .vmsd = &(_vmsd), \ + .size = sizeof(_type), \ + .flags = VMS_STRUCT|VMS_ARRAY, \ + .offset = vmstate_offset_array(_state, _field, _type, _num),\ +} + +#define VMSTATE_STRUCT_VARRAY_UINT8(_field, _state, _field_num, _version, _vmsd, _type) { \ + .name = (stringify(_field)), \ + .num_offset = vmstate_offset_value(_state, _field_num, uint8_t), \ + .version_id = (_version), \ + .vmsd = &(_vmsd), \ + .size = sizeof(_type), \ + .flags = VMS_STRUCT|VMS_VARRAY_UINT8, \ + .offset = offsetof(_state, _field), \ +} + +#define VMSTATE_STRUCT_VARRAY_POINTER_INT32(_field, _state, _field_num, _vmsd, _type) { \ + .name = (stringify(_field)), \ + .version_id = 0, \ + .num_offset = vmstate_offset_value(_state, _field_num, int32_t), \ + .size = sizeof(_type), \ + .vmsd = &(_vmsd), \ + .flags = VMS_POINTER | VMS_VARRAY_INT32 | VMS_STRUCT, \ + .offset = vmstate_offset_pointer(_state, _field, _type), \ +} + +#define VMSTATE_STRUCT_VARRAY_POINTER_UINT32(_field, _state, _field_num, _vmsd, _type) { \ + .name = (stringify(_field)), \ + .version_id = 0, \ + .num_offset = vmstate_offset_value(_state, _field_num, uint32_t),\ + .size = sizeof(_type), \ + .vmsd = &(_vmsd), \ + .flags = VMS_POINTER | VMS_VARRAY_INT32 | VMS_STRUCT, \ + .offset = vmstate_offset_pointer(_state, _field, _type), \ +} + +#define VMSTATE_STRUCT_VARRAY_POINTER_UINT16(_field, _state, _field_num, _vmsd, _type) { \ + .name = (stringify(_field)), \ + .version_id = 0, \ + .num_offset = vmstate_offset_value(_state, _field_num, uint16_t),\ + .size = sizeof(_type), \ + .vmsd = &(_vmsd), \ + .flags = VMS_POINTER | VMS_VARRAY_UINT16 | VMS_STRUCT, \ + .offset = vmstate_offset_pointer(_state, _field, _type), \ +} + +#define VMSTATE_STRUCT_VARRAY_INT32(_field, _state, _field_num, _version, _vmsd, _type) { \ + .name = (stringify(_field)), \ + .num_offset = vmstate_offset_value(_state, _field_num, int32_t), \ + .version_id = (_version), \ + .vmsd = &(_vmsd), \ + .size = sizeof(_type), \ + .flags = VMS_STRUCT|VMS_VARRAY_INT32, \ + .offset = offsetof(_state, _field), \ +} + +#define VMSTATE_STRUCT_VARRAY_UINT32(_field, _state, _field_num, _version, _vmsd, _type) { \ + .name = (stringify(_field)), \ + .num_offset = vmstate_offset_value(_state, _field_num, uint32_t), \ + .version_id = (_version), \ + .vmsd = &(_vmsd), \ + .size = sizeof(_type), \ + .flags = VMS_STRUCT|VMS_VARRAY_UINT32, \ + .offset = offsetof(_state, _field), \ +} + +#define VMSTATE_STATIC_BUFFER(_field, _state, _version, _test, _start, _size) { \ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .field_exists = (_test), \ + .size = (_size - _start), \ + .info = &vmstate_info_buffer, \ + .flags = VMS_BUFFER, \ + .offset = vmstate_offset_buffer(_state, _field) + _start, \ +} + +#define VMSTATE_VBUFFER_MULTIPLY(_field, _state, _version, _test, _start, _field_size, _multiply) { \ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .field_exists = (_test), \ + .size_offset = vmstate_offset_value(_state, _field_size, uint32_t),\ + .size = (_multiply), \ + .info = &vmstate_info_buffer, \ + .flags = VMS_VBUFFER|VMS_POINTER|VMS_MULTIPLY, \ + .offset = offsetof(_state, _field), \ + .start = (_start), \ +} + +#define VMSTATE_VBUFFER(_field, _state, _version, _test, _start, _field_size) { \ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .field_exists = (_test), \ + .size_offset = vmstate_offset_value(_state, _field_size, int32_t),\ + .info = &vmstate_info_buffer, \ + .flags = VMS_VBUFFER|VMS_POINTER, \ + .offset = offsetof(_state, _field), \ + .start = (_start), \ +} + +#define VMSTATE_VBUFFER_UINT32(_field, _state, _version, _test, _start, _field_size) { \ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .field_exists = (_test), \ + .size_offset = vmstate_offset_value(_state, _field_size, uint32_t),\ + .info = &vmstate_info_buffer, \ + .flags = VMS_VBUFFER|VMS_POINTER, \ + .offset = offsetof(_state, _field), \ + .start = (_start), \ +} + +#define VMSTATE_BUFFER_UNSAFE_INFO(_field, _state, _version, _info, _size) { \ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .size = (_size), \ + .info = &(_info), \ + .flags = VMS_BUFFER, \ + .offset = offsetof(_state, _field), \ +} + +#define VMSTATE_BUFFER_POINTER_UNSAFE(_field, _state, _version, _size) { \ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .size = (_size), \ + .info = &vmstate_info_buffer, \ + .flags = VMS_BUFFER|VMS_POINTER, \ + .offset = offsetof(_state, _field), \ +} + +#define VMSTATE_UNUSED_BUFFER(_test, _version, _size) { \ + .name = "unused", \ + .field_exists = (_test), \ + .version_id = (_version), \ + .size = (_size), \ + .info = &vmstate_info_unused_buffer, \ + .flags = VMS_BUFFER, \ +} + +/* _field_size should be a int32_t field in the _state struct giving the + * size of the bitmap _field in bits. + */ +#define VMSTATE_BITMAP(_field, _state, _version, _field_size) { \ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .size_offset = vmstate_offset_value(_state, _field_size, int32_t),\ + .info = &vmstate_info_bitmap, \ + .flags = VMS_VBUFFER|VMS_POINTER, \ + .offset = offsetof(_state, _field), \ +} + +/* _f : field name + _f_n : num of elements field_name + _n : num of elements + _s : struct state name + _v : version +*/ + +#define VMSTATE_SINGLE(_field, _state, _version, _info, _type) \ + VMSTATE_SINGLE_TEST(_field, _state, NULL, _version, _info, _type) + +#define VMSTATE_STRUCT(_field, _state, _version, _vmsd, _type) \ + VMSTATE_STRUCT_TEST(_field, _state, NULL, _version, _vmsd, _type) + +#define VMSTATE_STRUCT_POINTER(_field, _state, _vmsd, _type) \ + VMSTATE_STRUCT_POINTER_TEST(_field, _state, NULL, _vmsd, _type) + +#define VMSTATE_STRUCT_ARRAY(_field, _state, _num, _version, _vmsd, _type) \ + VMSTATE_STRUCT_ARRAY_TEST(_field, _state, _num, NULL, _version, \ + _vmsd, _type) + +#define VMSTATE_BOOL_V(_f, _s, _v) \ + VMSTATE_SINGLE(_f, _s, _v, vmstate_info_bool, bool) + +#define VMSTATE_INT8_V(_f, _s, _v) \ + VMSTATE_SINGLE(_f, _s, _v, vmstate_info_int8, int8_t) +#define VMSTATE_INT16_V(_f, _s, _v) \ + VMSTATE_SINGLE(_f, _s, _v, vmstate_info_int16, int16_t) +#define VMSTATE_INT32_V(_f, _s, _v) \ + VMSTATE_SINGLE(_f, _s, _v, vmstate_info_int32, int32_t) +#define VMSTATE_INT64_V(_f, _s, _v) \ + VMSTATE_SINGLE(_f, _s, _v, vmstate_info_int64, int64_t) + +#define VMSTATE_UINT8_V(_f, _s, _v) \ + VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint8, uint8_t) +#define VMSTATE_UINT16_V(_f, _s, _v) \ + VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint16, uint16_t) +#define VMSTATE_UINT32_V(_f, _s, _v) \ + VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint32, uint32_t) +#define VMSTATE_UINT64_V(_f, _s, _v) \ + VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint64, uint64_t) + +#define VMSTATE_BOOL(_f, _s) \ + VMSTATE_BOOL_V(_f, _s, 0) + +#define VMSTATE_INT8(_f, _s) \ + VMSTATE_INT8_V(_f, _s, 0) +#define VMSTATE_INT16(_f, _s) \ + VMSTATE_INT16_V(_f, _s, 0) +#define VMSTATE_INT32(_f, _s) \ + VMSTATE_INT32_V(_f, _s, 0) +#define VMSTATE_INT64(_f, _s) \ + VMSTATE_INT64_V(_f, _s, 0) + +#define VMSTATE_UINT8(_f, _s) \ + VMSTATE_UINT8_V(_f, _s, 0) +#define VMSTATE_UINT16(_f, _s) \ + VMSTATE_UINT16_V(_f, _s, 0) +#define VMSTATE_UINT32(_f, _s) \ + VMSTATE_UINT32_V(_f, _s, 0) +#define VMSTATE_UINT64(_f, _s) \ + VMSTATE_UINT64_V(_f, _s, 0) + +#define VMSTATE_UINT8_EQUAL(_f, _s) \ + VMSTATE_SINGLE(_f, _s, 0, vmstate_info_uint8_equal, uint8_t) + +#define VMSTATE_UINT16_EQUAL(_f, _s) \ + VMSTATE_SINGLE(_f, _s, 0, vmstate_info_uint16_equal, uint16_t) + +#define VMSTATE_UINT16_EQUAL_V(_f, _s, _v) \ + VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint16_equal, uint16_t) + +#define VMSTATE_INT32_EQUAL(_f, _s) \ + VMSTATE_SINGLE(_f, _s, 0, vmstate_info_int32_equal, int32_t) + +#define VMSTATE_UINT32_EQUAL_V(_f, _s, _v) \ + VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint32_equal, uint32_t) + +#define VMSTATE_UINT32_EQUAL(_f, _s) \ + VMSTATE_UINT32_EQUAL_V(_f, _s, 0) + +#define VMSTATE_UINT64_EQUAL_V(_f, _s, _v) \ + VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint64_equal, uint64_t) + +#define VMSTATE_UINT64_EQUAL(_f, _s) \ + VMSTATE_UINT64_EQUAL_V(_f, _s, 0) + +#define VMSTATE_INT32_LE(_f, _s) \ + VMSTATE_SINGLE(_f, _s, 0, vmstate_info_int32_le, int32_t) + +#define VMSTATE_UINT8_TEST(_f, _s, _t) \ + VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_info_uint8, uint8_t) + +#define VMSTATE_UINT16_TEST(_f, _s, _t) \ + VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_info_uint16, uint16_t) + +#define VMSTATE_UINT32_TEST(_f, _s, _t) \ + VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_info_uint32, uint32_t) + + +#define VMSTATE_FLOAT64_V(_f, _s, _v) \ + VMSTATE_SINGLE(_f, _s, _v, vmstate_info_float64, float64) + +#define VMSTATE_FLOAT64(_f, _s) \ + VMSTATE_FLOAT64_V(_f, _s, 0) + +#define VMSTATE_TIMER_TEST(_f, _s, _test) \ + VMSTATE_POINTER_TEST(_f, _s, _test, vmstate_info_timer, QEMUTimer *) + +#define VMSTATE_TIMER_V(_f, _s, _v) \ + VMSTATE_POINTER(_f, _s, _v, vmstate_info_timer, QEMUTimer *) + +#define VMSTATE_TIMER(_f, _s) \ + VMSTATE_TIMER_V(_f, _s, 0) + +#define VMSTATE_TIMER_ARRAY(_f, _s, _n) \ + VMSTATE_ARRAY_OF_POINTER(_f, _s, _n, 0, vmstate_info_timer, QEMUTimer *) + +#define VMSTATE_BOOL_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_bool, bool) + +#define VMSTATE_BOOL_ARRAY(_f, _s, _n) \ + VMSTATE_BOOL_ARRAY_V(_f, _s, _n, 0) + +#define VMSTATE_UINT16_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_uint16, uint16_t) + +#define VMSTATE_UINT16_2DARRAY_V(_f, _s, _n1, _n2, _v) \ + VMSTATE_2DARRAY(_f, _s, _n1, _n2, _v, vmstate_info_uint16, uint16_t) + +#define VMSTATE_UINT16_ARRAY(_f, _s, _n) \ + VMSTATE_UINT16_ARRAY_V(_f, _s, _n, 0) + +#define VMSTATE_UINT16_2DARRAY(_f, _s, _n1, _n2) \ + VMSTATE_UINT16_2DARRAY_V(_f, _s, _n1, _n2, 0) + +#define VMSTATE_UINT8_2DARRAY_V(_f, _s, _n1, _n2, _v) \ + VMSTATE_2DARRAY(_f, _s, _n1, _n2, _v, vmstate_info_uint8, uint8_t) + +#define VMSTATE_UINT8_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_uint8, uint8_t) + +#define VMSTATE_UINT8_ARRAY(_f, _s, _n) \ + VMSTATE_UINT8_ARRAY_V(_f, _s, _n, 0) + +#define VMSTATE_UINT8_2DARRAY(_f, _s, _n1, _n2) \ + VMSTATE_UINT8_2DARRAY_V(_f, _s, _n1, _n2, 0) + +#define VMSTATE_UINT32_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_uint32, uint32_t) + +#define VMSTATE_UINT32_ARRAY(_f, _s, _n) \ + VMSTATE_UINT32_ARRAY_V(_f, _s, _n, 0) + +#define VMSTATE_UINT64_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_uint64, uint64_t) + +#define VMSTATE_UINT64_ARRAY(_f, _s, _n) \ + VMSTATE_UINT64_ARRAY_V(_f, _s, _n, 0) + +#define VMSTATE_INT16_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_int16, int16_t) + +#define VMSTATE_INT16_ARRAY(_f, _s, _n) \ + VMSTATE_INT16_ARRAY_V(_f, _s, _n, 0) + +#define VMSTATE_INT32_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_int32, int32_t) + +#define VMSTATE_INT32_ARRAY(_f, _s, _n) \ + VMSTATE_INT32_ARRAY_V(_f, _s, _n, 0) + +#define VMSTATE_UINT32_SUB_ARRAY(_f, _s, _start, _num) \ + VMSTATE_SUB_ARRAY(_f, _s, _start, _num, 0, vmstate_info_uint32, uint32_t) + +#define VMSTATE_UINT32_ARRAY(_f, _s, _n) \ + VMSTATE_UINT32_ARRAY_V(_f, _s, _n, 0) + +#define VMSTATE_INT64_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_int64, int64_t) + +#define VMSTATE_INT64_ARRAY(_f, _s, _n) \ + VMSTATE_INT64_ARRAY_V(_f, _s, _n, 0) + +#define VMSTATE_FLOAT64_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_float64, float64) + +#define VMSTATE_FLOAT64_ARRAY(_f, _s, _n) \ + VMSTATE_FLOAT64_ARRAY_V(_f, _s, _n, 0) + +#define VMSTATE_BUFFER_V(_f, _s, _v) \ + VMSTATE_STATIC_BUFFER(_f, _s, _v, NULL, 0, sizeof(typeof_field(_s, _f))) + +#define VMSTATE_BUFFER(_f, _s) \ + VMSTATE_BUFFER_V(_f, _s, 0) + +#define VMSTATE_PARTIAL_BUFFER(_f, _s, _size) \ + VMSTATE_STATIC_BUFFER(_f, _s, 0, NULL, 0, _size) + +#define VMSTATE_BUFFER_START_MIDDLE(_f, _s, _start) \ + VMSTATE_STATIC_BUFFER(_f, _s, 0, NULL, _start, sizeof(typeof_field(_s, _f))) + +#define VMSTATE_PARTIAL_VBUFFER(_f, _s, _size) \ + VMSTATE_VBUFFER(_f, _s, 0, NULL, 0, _size) + +#define VMSTATE_PARTIAL_VBUFFER_UINT32(_f, _s, _size) \ + VMSTATE_VBUFFER_UINT32(_f, _s, 0, NULL, 0, _size) + +#define VMSTATE_SUB_VBUFFER(_f, _s, _start, _size) \ + VMSTATE_VBUFFER(_f, _s, 0, NULL, _start, _size) + +#define VMSTATE_BUFFER_TEST(_f, _s, _test) \ + VMSTATE_STATIC_BUFFER(_f, _s, 0, _test, 0, sizeof(typeof_field(_s, _f))) + +#define VMSTATE_BUFFER_UNSAFE(_field, _state, _version, _size) \ + VMSTATE_BUFFER_UNSAFE_INFO(_field, _state, _version, vmstate_info_buffer, _size) + +#define VMSTATE_UNUSED_V(_v, _size) \ + VMSTATE_UNUSED_BUFFER(NULL, _v, _size) + +#define VMSTATE_UNUSED(_size) \ + VMSTATE_UNUSED_V(0, _size) + +#define VMSTATE_UNUSED_TEST(_test, _size) \ + VMSTATE_UNUSED_BUFFER(_test, 0, _size) + +#define VMSTATE_END_OF_LIST() \ + {} + +int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd, + void *opaque, int version_id); +void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd, + void *opaque); + +int vmstate_register_with_alias_id(DeviceState *dev, int instance_id, + const VMStateDescription *vmsd, + void *base, int alias_id, + int required_for_version); + +static inline int vmstate_register(DeviceState *dev, int instance_id, + const VMStateDescription *vmsd, + void *opaque) +{ + return vmstate_register_with_alias_id(dev, instance_id, vmsd, + opaque, -1, 0); +} + +void vmstate_unregister(DeviceState *dev, const VMStateDescription *vmsd, + void *opaque); + +struct MemoryRegion; +void vmstate_register_ram(struct MemoryRegion *memory, DeviceState *dev); +void vmstate_unregister_ram(struct MemoryRegion *memory, DeviceState *dev); +void vmstate_register_ram_global(struct MemoryRegion *memory); + +#endif diff --git a/contrib/qemu/include/monitor/monitor.h b/contrib/qemu/include/monitor/monitor.h new file mode 100644 index 00000000000..1942cc42fe2 --- /dev/null +++ b/contrib/qemu/include/monitor/monitor.h @@ -0,0 +1,104 @@ +#ifndef MONITOR_H +#define MONITOR_H + +#include "qemu-common.h" +#include "qapi/qmp/qerror.h" +#include "qapi/qmp/qdict.h" +#include "block/block.h" +#include "monitor/readline.h" + +extern Monitor *cur_mon; +extern Monitor *default_mon; + +/* flags for monitor_init */ +#define MONITOR_IS_DEFAULT 0x01 +#define MONITOR_USE_READLINE 0x02 +#define MONITOR_USE_CONTROL 0x04 +#define MONITOR_USE_PRETTY 0x08 + +/* flags for monitor commands */ +#define MONITOR_CMD_ASYNC 0x0001 + +/* QMP events */ +typedef enum MonitorEvent { + QEVENT_SHUTDOWN, + QEVENT_RESET, + QEVENT_POWERDOWN, + QEVENT_STOP, + QEVENT_RESUME, + QEVENT_VNC_CONNECTED, + QEVENT_VNC_INITIALIZED, + QEVENT_VNC_DISCONNECTED, + QEVENT_BLOCK_IO_ERROR, + QEVENT_RTC_CHANGE, + QEVENT_WATCHDOG, + QEVENT_SPICE_CONNECTED, + QEVENT_SPICE_INITIALIZED, + QEVENT_SPICE_DISCONNECTED, + QEVENT_BLOCK_JOB_COMPLETED, + QEVENT_BLOCK_JOB_CANCELLED, + QEVENT_BLOCK_JOB_ERROR, + QEVENT_BLOCK_JOB_READY, + QEVENT_DEVICE_DELETED, + QEVENT_DEVICE_TRAY_MOVED, + QEVENT_NIC_RX_FILTER_CHANGED, + QEVENT_SUSPEND, + QEVENT_SUSPEND_DISK, + QEVENT_WAKEUP, + QEVENT_BALLOON_CHANGE, + QEVENT_SPICE_MIGRATE_COMPLETED, + QEVENT_GUEST_PANICKED, + + /* Add to 'monitor_event_names' array in monitor.c when + * defining new events here */ + + QEVENT_MAX, +} MonitorEvent; + +int monitor_cur_is_qmp(void); + +void monitor_protocol_event(MonitorEvent event, QObject *data); +void monitor_init(CharDriverState *chr, int flags); + +int monitor_suspend(Monitor *mon); +void monitor_resume(Monitor *mon); + +int monitor_read_bdrv_key_start(Monitor *mon, BlockDriverState *bs, + BlockDriverCompletionFunc *completion_cb, + void *opaque); +int monitor_read_block_device_key(Monitor *mon, const char *device, + BlockDriverCompletionFunc *completion_cb, + void *opaque); + +int monitor_get_fd(Monitor *mon, const char *fdname, Error **errp); +int monitor_handle_fd_param(Monitor *mon, const char *fdname); + +void monitor_vprintf(Monitor *mon, const char *fmt, va_list ap) + GCC_FMT_ATTR(2, 0); +void monitor_printf(Monitor *mon, const char *fmt, ...) GCC_FMT_ATTR(2, 3); +void monitor_print_filename(Monitor *mon, const char *filename); +void monitor_flush(Monitor *mon); +int monitor_set_cpu(int cpu_index); +int monitor_get_cpu_index(void); + +typedef void (MonitorCompletion)(void *opaque, QObject *ret_data); + +void monitor_set_error(Monitor *mon, QError *qerror); +void monitor_read_command(Monitor *mon, int show_prompt); +ReadLineState *monitor_get_rs(Monitor *mon); +int monitor_read_password(Monitor *mon, ReadLineFunc *readline_func, + void *opaque); + +int qmp_qom_set(Monitor *mon, const QDict *qdict, QObject **ret); + +int qmp_qom_get(Monitor *mon, const QDict *qdict, QObject **ret); + +AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id, + bool has_opaque, const char *opaque, + Error **errp); +int monitor_fdset_get_fd(int64_t fdset_id, int flags); +int monitor_fdset_dup_fd_add(int64_t fdset_id, int dup_fd); +int monitor_fdset_dup_fd_remove(int dup_fd); +int monitor_fdset_dup_fd_find(int dup_fd); + +#endif /* !MONITOR_H */ diff --git a/contrib/qemu/include/monitor/readline.h b/contrib/qemu/include/monitor/readline.h new file mode 100644 index 00000000000..fc9806ecf1a --- /dev/null +++ b/contrib/qemu/include/monitor/readline.h @@ -0,0 +1,55 @@ +#ifndef READLINE_H +#define READLINE_H + +#include "qemu-common.h" + +#define READLINE_CMD_BUF_SIZE 4095 +#define READLINE_MAX_CMDS 64 +#define READLINE_MAX_COMPLETIONS 256 + +typedef void ReadLineFunc(Monitor *mon, const char *str, void *opaque); +typedef void ReadLineCompletionFunc(const char *cmdline); + +typedef struct ReadLineState { + char cmd_buf[READLINE_CMD_BUF_SIZE + 1]; + int cmd_buf_index; + int cmd_buf_size; + + char last_cmd_buf[READLINE_CMD_BUF_SIZE + 1]; + int last_cmd_buf_index; + int last_cmd_buf_size; + + int esc_state; + int esc_param; + + char *history[READLINE_MAX_CMDS]; + int hist_entry; + + ReadLineCompletionFunc *completion_finder; + char *completions[READLINE_MAX_COMPLETIONS]; + int nb_completions; + int completion_index; + + ReadLineFunc *readline_func; + void *readline_opaque; + int read_password; + char prompt[256]; + Monitor *mon; +} ReadLineState; + +void readline_add_completion(ReadLineState *rs, const char *str); +void readline_set_completion_index(ReadLineState *rs, int completion_index); + +const char *readline_get_history(ReadLineState *rs, unsigned int index); + +void readline_handle_byte(ReadLineState *rs, int ch); + +void readline_start(ReadLineState *rs, const char *prompt, int read_password, + ReadLineFunc *readline_func, void *opaque); +void readline_restart(ReadLineState *rs); +void readline_show_prompt(ReadLineState *rs); + +ReadLineState *readline_init(Monitor *mon, + ReadLineCompletionFunc *completion_finder); + +#endif /* !READLINE_H */ diff --git a/contrib/qemu/include/qapi/error.h b/contrib/qemu/include/qapi/error.h new file mode 100644 index 00000000000..ffd1cea4772 --- /dev/null +++ b/contrib/qemu/include/qapi/error.h @@ -0,0 +1,85 @@ +/* + * QEMU Error Objects + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2. See + * the COPYING.LIB file in the top-level directory. + */ +#ifndef ERROR_H +#define ERROR_H + +#include "qemu/compiler.h" +#include "qapi-types.h" +#include <stdbool.h> + +/** + * A class representing internal errors within QEMU. An error has a ErrorClass + * code and a human message. + */ +typedef struct Error Error; + +/** + * Set an indirect pointer to an error given a ErrorClass value and a + * printf-style human message. This function is not meant to be used outside + * of QEMU. + */ +void error_set(Error **err, ErrorClass err_class, const char *fmt, ...) GCC_FMT_ATTR(3, 4); + +/** + * Set an indirect pointer to an error given a ErrorClass value and a + * printf-style human message, followed by a strerror() string if + * @os_error is not zero. + */ +void error_set_errno(Error **err, int os_error, ErrorClass err_class, const char *fmt, ...) GCC_FMT_ATTR(4, 5); + +/** + * Same as error_set(), but sets a generic error + */ +#define error_setg(err, fmt, ...) \ + error_set(err, ERROR_CLASS_GENERIC_ERROR, fmt, ## __VA_ARGS__) +#define error_setg_errno(err, os_error, fmt, ...) \ + error_set_errno(err, os_error, ERROR_CLASS_GENERIC_ERROR, fmt, ## __VA_ARGS__) + +/** + * Helper for open() errors + */ +void error_setg_file_open(Error **errp, int os_errno, const char *filename); + +/** + * Returns true if an indirect pointer to an error is pointing to a valid + * error object. + */ +bool error_is_set(Error **err); + +/* + * Get the error class of an error object. + */ +ErrorClass error_get_class(const Error *err); + +/** + * Returns an exact copy of the error passed as an argument. + */ +Error *error_copy(const Error *err); + +/** + * Get a human readable representation of an error object. + */ +const char *error_get_pretty(Error *err); + +/** + * Propagate an error to an indirect pointer to an error. This function will + * always transfer ownership of the error reference and handles the case where + * dst_err is NULL correctly. Errors after the first are discarded. + */ +void error_propagate(Error **dst_err, Error *local_err); + +/** + * Free an error object. + */ +void error_free(Error *err); + +#endif diff --git a/contrib/qemu/include/qapi/qmp/json-lexer.h b/contrib/qemu/include/qapi/qmp/json-lexer.h new file mode 100644 index 00000000000..cdff0460a83 --- /dev/null +++ b/contrib/qemu/include/qapi/qmp/json-lexer.h @@ -0,0 +1,51 @@ +/* + * JSON lexer + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QEMU_JSON_LEXER_H +#define QEMU_JSON_LEXER_H + +#include "qapi/qmp/qstring.h" +#include "qapi/qmp/qlist.h" + +typedef enum json_token_type { + JSON_OPERATOR = 100, + JSON_INTEGER, + JSON_FLOAT, + JSON_KEYWORD, + JSON_STRING, + JSON_ESCAPE, + JSON_SKIP, + JSON_ERROR, +} JSONTokenType; + +typedef struct JSONLexer JSONLexer; + +typedef void (JSONLexerEmitter)(JSONLexer *, QString *, JSONTokenType, int x, int y); + +struct JSONLexer +{ + JSONLexerEmitter *emit; + int state; + QString *token; + int x, y; +}; + +void json_lexer_init(JSONLexer *lexer, JSONLexerEmitter func); + +int json_lexer_feed(JSONLexer *lexer, const char *buffer, size_t size); + +int json_lexer_flush(JSONLexer *lexer); + +void json_lexer_destroy(JSONLexer *lexer); + +#endif diff --git a/contrib/qemu/include/qapi/qmp/json-parser.h b/contrib/qemu/include/qapi/qmp/json-parser.h new file mode 100644 index 00000000000..44d88f34685 --- /dev/null +++ b/contrib/qemu/include/qapi/qmp/json-parser.h @@ -0,0 +1,24 @@ +/* + * JSON Parser + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QEMU_JSON_PARSER_H +#define QEMU_JSON_PARSER_H + +#include "qemu-common.h" +#include "qapi/qmp/qlist.h" +#include "qapi/error.h" + +QObject *json_parser_parse(QList *tokens, va_list *ap); +QObject *json_parser_parse_err(QList *tokens, va_list *ap, Error **errp); + +#endif diff --git a/contrib/qemu/include/qapi/qmp/json-streamer.h b/contrib/qemu/include/qapi/qmp/json-streamer.h new file mode 100644 index 00000000000..823f7d7fa42 --- /dev/null +++ b/contrib/qemu/include/qapi/qmp/json-streamer.h @@ -0,0 +1,40 @@ +/* + * JSON streaming support + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QEMU_JSON_STREAMER_H +#define QEMU_JSON_STREAMER_H + +#include "qapi/qmp/qlist.h" +#include "qapi/qmp/json-lexer.h" + +typedef struct JSONMessageParser +{ + void (*emit)(struct JSONMessageParser *parser, QList *tokens); + JSONLexer lexer; + int brace_count; + int bracket_count; + QList *tokens; + uint64_t token_size; +} JSONMessageParser; + +void json_message_parser_init(JSONMessageParser *parser, + void (*func)(JSONMessageParser *, QList *)); + +int json_message_parser_feed(JSONMessageParser *parser, + const char *buffer, size_t size); + +int json_message_parser_flush(JSONMessageParser *parser); + +void json_message_parser_destroy(JSONMessageParser *parser); + +#endif diff --git a/contrib/qemu/include/qapi/qmp/qbool.h b/contrib/qemu/include/qapi/qmp/qbool.h new file mode 100644 index 00000000000..c4eaab9bb9f --- /dev/null +++ b/contrib/qemu/include/qapi/qmp/qbool.h @@ -0,0 +1,29 @@ +/* + * QBool Module + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QBOOL_H +#define QBOOL_H + +#include <stdint.h> +#include "qapi/qmp/qobject.h" + +typedef struct QBool { + QObject_HEAD; + int value; +} QBool; + +QBool *qbool_from_int(int value); +int qbool_get_int(const QBool *qb); +QBool *qobject_to_qbool(const QObject *obj); + +#endif /* QBOOL_H */ diff --git a/contrib/qemu/include/qapi/qmp/qdict.h b/contrib/qemu/include/qapi/qmp/qdict.h new file mode 100644 index 00000000000..685b2e3fcbb --- /dev/null +++ b/contrib/qemu/include/qapi/qmp/qdict.h @@ -0,0 +1,69 @@ +/* + * QDict Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino <lcapitulino@redhat.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#ifndef QDICT_H +#define QDICT_H + +#include "qapi/qmp/qobject.h" +#include "qapi/qmp/qlist.h" +#include "qemu/queue.h" +#include <stdint.h> + +#define QDICT_BUCKET_MAX 512 + +typedef struct QDictEntry { + char *key; + QObject *value; + QLIST_ENTRY(QDictEntry) next; +} QDictEntry; + +typedef struct QDict { + QObject_HEAD; + size_t size; + QLIST_HEAD(,QDictEntry) table[QDICT_BUCKET_MAX]; +} QDict; + +/* Object API */ +QDict *qdict_new(void); +const char *qdict_entry_key(const QDictEntry *entry); +QObject *qdict_entry_value(const QDictEntry *entry); +size_t qdict_size(const QDict *qdict); +void qdict_put_obj(QDict *qdict, const char *key, QObject *value); +void qdict_del(QDict *qdict, const char *key); +int qdict_haskey(const QDict *qdict, const char *key); +QObject *qdict_get(const QDict *qdict, const char *key); +QDict *qobject_to_qdict(const QObject *obj); +void qdict_iter(const QDict *qdict, + void (*iter)(const char *key, QObject *obj, void *opaque), + void *opaque); +const QDictEntry *qdict_first(const QDict *qdict); +const QDictEntry *qdict_next(const QDict *qdict, const QDictEntry *entry); + +/* Helper to qdict_put_obj(), accepts any object */ +#define qdict_put(qdict, key, obj) \ + qdict_put_obj(qdict, key, QOBJECT(obj)) + +/* High level helpers */ +double qdict_get_double(const QDict *qdict, const char *key); +int64_t qdict_get_int(const QDict *qdict, const char *key); +int qdict_get_bool(const QDict *qdict, const char *key); +QList *qdict_get_qlist(const QDict *qdict, const char *key); +QDict *qdict_get_qdict(const QDict *qdict, const char *key); +const char *qdict_get_str(const QDict *qdict, const char *key); +int64_t qdict_get_try_int(const QDict *qdict, const char *key, + int64_t def_value); +int qdict_get_try_bool(const QDict *qdict, const char *key, int def_value); +const char *qdict_get_try_str(const QDict *qdict, const char *key); + +QDict *qdict_clone_shallow(const QDict *src); + +#endif /* QDICT_H */ diff --git a/contrib/qemu/include/qapi/qmp/qerror.h b/contrib/qemu/include/qapi/qmp/qerror.h new file mode 100644 index 00000000000..c30c2f6d7a3 --- /dev/null +++ b/contrib/qemu/include/qapi/qmp/qerror.h @@ -0,0 +1,249 @@ +/* + * QError Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino <lcapitulino@redhat.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ +#ifndef QERROR_H +#define QERROR_H + +#include "qapi/qmp/qdict.h" +#include "qapi/qmp/qstring.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qapi-types.h" +#include <stdarg.h> + +typedef struct QError { + QObject_HEAD; + Location loc; + char *err_msg; + ErrorClass err_class; +} QError; + +QString *qerror_human(const QError *qerror); +void qerror_report(ErrorClass err_class, const char *fmt, ...) GCC_FMT_ATTR(2, 3); +void qerror_report_err(Error *err); +void assert_no_error(Error *err); + +/* + * QError class list + * Please keep the definitions in alphabetical order. + * Use scripts/check-qerror.sh to check. + */ +#define QERR_ADD_CLIENT_FAILED \ + ERROR_CLASS_GENERIC_ERROR, "Could not add client" + +#define QERR_AMBIGUOUS_PATH \ + ERROR_CLASS_GENERIC_ERROR, "Path '%s' does not uniquely identify an object" + +#define QERR_BAD_BUS_FOR_DEVICE \ + ERROR_CLASS_GENERIC_ERROR, "Device '%s' can't go on a %s bus" + +#define QERR_BASE_NOT_FOUND \ + ERROR_CLASS_GENERIC_ERROR, "Base '%s' not found" + +#define QERR_BLOCK_JOB_NOT_ACTIVE \ + ERROR_CLASS_DEVICE_NOT_ACTIVE, "No active block job on device '%s'" + +#define QERR_BLOCK_JOB_PAUSED \ + ERROR_CLASS_GENERIC_ERROR, "The block job for device '%s' is currently paused" + +#define QERR_BLOCK_JOB_NOT_READY \ + ERROR_CLASS_GENERIC_ERROR, "The active block job for device '%s' cannot be completed" + +#define QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED \ + ERROR_CLASS_GENERIC_ERROR, "Block format '%s' used by device '%s' does not support feature '%s'" + +#define QERR_BUFFER_OVERRUN \ + ERROR_CLASS_GENERIC_ERROR, "An internal buffer overran" + +#define QERR_BUS_NO_HOTPLUG \ + ERROR_CLASS_GENERIC_ERROR, "Bus '%s' does not support hotplugging" + +#define QERR_BUS_NOT_FOUND \ + ERROR_CLASS_GENERIC_ERROR, "Bus '%s' not found" + +#define QERR_COMMAND_DISABLED \ + ERROR_CLASS_GENERIC_ERROR, "The command %s has been disabled for this instance" + +#define QERR_COMMAND_NOT_FOUND \ + ERROR_CLASS_COMMAND_NOT_FOUND, "The command %s has not been found" + +#define QERR_DEVICE_ENCRYPTED \ + ERROR_CLASS_DEVICE_ENCRYPTED, "'%s' (%s) is encrypted" + +#define QERR_DEVICE_FEATURE_BLOCKS_MIGRATION \ + ERROR_CLASS_GENERIC_ERROR, "Migration is disabled when using feature '%s' in device '%s'" + +#define QERR_DEVICE_HAS_NO_MEDIUM \ + ERROR_CLASS_GENERIC_ERROR, "Device '%s' has no medium" + +#define QERR_DEVICE_INIT_FAILED \ + ERROR_CLASS_GENERIC_ERROR, "Device '%s' could not be initialized" + +#define QERR_DEVICE_IN_USE \ + ERROR_CLASS_GENERIC_ERROR, "Device '%s' is in use" + +#define QERR_DEVICE_IS_READ_ONLY \ + ERROR_CLASS_GENERIC_ERROR, "Device '%s' is read only" + +#define QERR_DEVICE_LOCKED \ + ERROR_CLASS_GENERIC_ERROR, "Device '%s' is locked" + +#define QERR_DEVICE_MULTIPLE_BUSSES \ + ERROR_CLASS_GENERIC_ERROR, "Device '%s' has multiple child busses" + +#define QERR_DEVICE_NO_BUS \ + ERROR_CLASS_GENERIC_ERROR, "Device '%s' has no child bus" + +#define QERR_DEVICE_NO_HOTPLUG \ + ERROR_CLASS_GENERIC_ERROR, "Device '%s' does not support hotplugging" + +#define QERR_DEVICE_NOT_ACTIVE \ + ERROR_CLASS_DEVICE_NOT_ACTIVE, "Device '%s' has not been activated" + +#define QERR_DEVICE_NOT_ENCRYPTED \ + ERROR_CLASS_GENERIC_ERROR, "Device '%s' is not encrypted" + +#define QERR_DEVICE_NOT_FOUND \ + ERROR_CLASS_DEVICE_NOT_FOUND, "Device '%s' not found" + +#define QERR_DEVICE_NOT_REMOVABLE \ + ERROR_CLASS_GENERIC_ERROR, "Device '%s' is not removable" + +#define QERR_DUPLICATE_ID \ + ERROR_CLASS_GENERIC_ERROR, "Duplicate ID '%s' for %s" + +#define QERR_FD_NOT_FOUND \ + ERROR_CLASS_GENERIC_ERROR, "File descriptor named '%s' not found" + +#define QERR_FD_NOT_SUPPLIED \ + ERROR_CLASS_GENERIC_ERROR, "No file descriptor supplied via SCM_RIGHTS" + +#define QERR_FEATURE_DISABLED \ + ERROR_CLASS_GENERIC_ERROR, "The feature '%s' is not enabled" + +#define QERR_INVALID_BLOCK_FORMAT \ + ERROR_CLASS_GENERIC_ERROR, "Invalid block format '%s'" + +#define QERR_INVALID_OPTION_GROUP \ + ERROR_CLASS_GENERIC_ERROR, "There is no option group '%s'" + +#define QERR_INVALID_PARAMETER \ + ERROR_CLASS_GENERIC_ERROR, "Invalid parameter '%s'" + +#define QERR_INVALID_PARAMETER_COMBINATION \ + ERROR_CLASS_GENERIC_ERROR, "Invalid parameter combination" + +#define QERR_INVALID_PARAMETER_TYPE \ + ERROR_CLASS_GENERIC_ERROR, "Invalid parameter type for '%s', expected: %s" + +#define QERR_INVALID_PARAMETER_VALUE \ + ERROR_CLASS_GENERIC_ERROR, "Parameter '%s' expects %s" + +#define QERR_INVALID_PASSWORD \ + ERROR_CLASS_GENERIC_ERROR, "Password incorrect" + +#define QERR_IO_ERROR \ + ERROR_CLASS_GENERIC_ERROR, "An IO error has occurred" + +#define QERR_JSON_PARSE_ERROR \ + ERROR_CLASS_GENERIC_ERROR, "JSON parse error, %s" + +#define QERR_JSON_PARSING \ + ERROR_CLASS_GENERIC_ERROR, "Invalid JSON syntax" + +#define QERR_KVM_MISSING_CAP \ + ERROR_CLASS_K_V_M_MISSING_CAP, "Using KVM without %s, %s unavailable" + +#define QERR_MIGRATION_ACTIVE \ + ERROR_CLASS_GENERIC_ERROR, "There's a migration process in progress" + +#define QERR_MIGRATION_NOT_SUPPORTED \ + ERROR_CLASS_GENERIC_ERROR, "State blocked by non-migratable device '%s'" + +#define QERR_MISSING_PARAMETER \ + ERROR_CLASS_GENERIC_ERROR, "Parameter '%s' is missing" + +#define QERR_NO_BUS_FOR_DEVICE \ + ERROR_CLASS_GENERIC_ERROR, "No '%s' bus found for device '%s'" + +#define QERR_NOT_SUPPORTED \ + ERROR_CLASS_GENERIC_ERROR, "Not supported" + +#define QERR_PERMISSION_DENIED \ + ERROR_CLASS_GENERIC_ERROR, "Insufficient permission to perform this operation" + +#define QERR_PROPERTY_NOT_FOUND \ + ERROR_CLASS_GENERIC_ERROR, "Property '%s.%s' not found" + +#define QERR_PROPERTY_VALUE_BAD \ + ERROR_CLASS_GENERIC_ERROR, "Property '%s.%s' doesn't take value '%s'" + +#define QERR_PROPERTY_VALUE_IN_USE \ + ERROR_CLASS_GENERIC_ERROR, "Property '%s.%s' can't take value '%s', it's in use" + +#define QERR_PROPERTY_VALUE_NOT_FOUND \ + ERROR_CLASS_GENERIC_ERROR, "Property '%s.%s' can't find value '%s'" + +#define QERR_PROPERTY_VALUE_NOT_POWER_OF_2 \ + ERROR_CLASS_GENERIC_ERROR, "Property %s.%s doesn't take value '%" PRId64 "', it's not a power of 2" + +#define QERR_PROPERTY_VALUE_OUT_OF_RANGE \ + ERROR_CLASS_GENERIC_ERROR, "Property %s.%s doesn't take value %" PRId64 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" + +#define QERR_QGA_COMMAND_FAILED \ + ERROR_CLASS_GENERIC_ERROR, "Guest agent command failed, error was '%s'" + +#define QERR_QGA_LOGGING_FAILED \ + ERROR_CLASS_GENERIC_ERROR, "Guest agent failed to log non-optional log statement" + +#define QERR_QMP_BAD_INPUT_OBJECT \ + ERROR_CLASS_GENERIC_ERROR, "Expected '%s' in QMP input" + +#define QERR_QMP_BAD_INPUT_OBJECT_MEMBER \ + ERROR_CLASS_GENERIC_ERROR, "QMP input object member '%s' expects '%s'" + +#define QERR_QMP_EXTRA_MEMBER \ + ERROR_CLASS_GENERIC_ERROR, "QMP input object member '%s' is unexpected" + +#define QERR_RESET_REQUIRED \ + ERROR_CLASS_GENERIC_ERROR, "Resetting the Virtual Machine is required" + +#define QERR_SET_PASSWD_FAILED \ + ERROR_CLASS_GENERIC_ERROR, "Could not set password" + +#define QERR_TOO_MANY_FILES \ + ERROR_CLASS_GENERIC_ERROR, "Too many open files" + +#define QERR_UNDEFINED_ERROR \ + ERROR_CLASS_GENERIC_ERROR, "An undefined error has occurred" + +#define QERR_UNKNOWN_BLOCK_FORMAT_FEATURE \ + ERROR_CLASS_GENERIC_ERROR, "'%s' uses a %s feature which is not supported by this qemu version: %s" + +#define QERR_UNSUPPORTED \ + ERROR_CLASS_GENERIC_ERROR, "this feature or command is not currently supported" + +#define QERR_VIRTFS_FEATURE_BLOCKS_MIGRATION \ + ERROR_CLASS_GENERIC_ERROR, "Migration is disabled when VirtFS export path '%s' is mounted in the guest using mount_tag '%s'" + +#define QERR_SOCKET_CONNECT_FAILED \ + ERROR_CLASS_GENERIC_ERROR, "Failed to connect to socket" + +#define QERR_SOCKET_LISTEN_FAILED \ + ERROR_CLASS_GENERIC_ERROR, "Failed to set socket to listening mode" + +#define QERR_SOCKET_BIND_FAILED \ + ERROR_CLASS_GENERIC_ERROR, "Failed to bind socket" + +#define QERR_SOCKET_CREATE_FAILED \ + ERROR_CLASS_GENERIC_ERROR, "Failed to create socket" + +#endif /* QERROR_H */ diff --git a/contrib/qemu/include/qapi/qmp/qfloat.h b/contrib/qemu/include/qapi/qmp/qfloat.h new file mode 100644 index 00000000000..a8658443dc2 --- /dev/null +++ b/contrib/qemu/include/qapi/qmp/qfloat.h @@ -0,0 +1,29 @@ +/* + * QFloat Module + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QFLOAT_H +#define QFLOAT_H + +#include <stdint.h> +#include "qapi/qmp/qobject.h" + +typedef struct QFloat { + QObject_HEAD; + double value; +} QFloat; + +QFloat *qfloat_from_double(double value); +double qfloat_get_double(const QFloat *qi); +QFloat *qobject_to_qfloat(const QObject *obj); + +#endif /* QFLOAT_H */ diff --git a/contrib/qemu/include/qapi/qmp/qint.h b/contrib/qemu/include/qapi/qmp/qint.h new file mode 100644 index 00000000000..48a41b0f2ae --- /dev/null +++ b/contrib/qemu/include/qapi/qmp/qint.h @@ -0,0 +1,28 @@ +/* + * QInt Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino <lcapitulino@redhat.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#ifndef QINT_H +#define QINT_H + +#include <stdint.h> +#include "qapi/qmp/qobject.h" + +typedef struct QInt { + QObject_HEAD; + int64_t value; +} QInt; + +QInt *qint_from_int(int64_t value); +int64_t qint_get_int(const QInt *qi); +QInt *qobject_to_qint(const QObject *obj); + +#endif /* QINT_H */ diff --git a/contrib/qemu/include/qapi/qmp/qjson.h b/contrib/qemu/include/qapi/qmp/qjson.h new file mode 100644 index 00000000000..73351ed6d68 --- /dev/null +++ b/contrib/qemu/include/qapi/qmp/qjson.h @@ -0,0 +1,29 @@ +/* + * QObject JSON integration + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QJSON_H +#define QJSON_H + +#include <stdarg.h> +#include "qemu/compiler.h" +#include "qapi/qmp/qobject.h" +#include "qapi/qmp/qstring.h" + +QObject *qobject_from_json(const char *string) GCC_FMT_ATTR(1, 0); +QObject *qobject_from_jsonf(const char *string, ...) GCC_FMT_ATTR(1, 2); +QObject *qobject_from_jsonv(const char *string, va_list *ap) GCC_FMT_ATTR(1, 0); + +QString *qobject_to_json(const QObject *obj); +QString *qobject_to_json_pretty(const QObject *obj); + +#endif /* QJSON_H */ diff --git a/contrib/qemu/include/qapi/qmp/qlist.h b/contrib/qemu/include/qapi/qmp/qlist.h new file mode 100644 index 00000000000..6cc4831df3e --- /dev/null +++ b/contrib/qemu/include/qapi/qmp/qlist.h @@ -0,0 +1,63 @@ +/* + * QList Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino <lcapitulino@redhat.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#ifndef QLIST_H +#define QLIST_H + +#include "qapi/qmp/qobject.h" +#include "qemu/queue.h" + +typedef struct QListEntry { + QObject *value; + QTAILQ_ENTRY(QListEntry) next; +} QListEntry; + +typedef struct QList { + QObject_HEAD; + QTAILQ_HEAD(,QListEntry) head; +} QList; + +#define qlist_append(qlist, obj) \ + qlist_append_obj(qlist, QOBJECT(obj)) + +#define QLIST_FOREACH_ENTRY(qlist, var) \ + for ((var) = ((qlist)->head.tqh_first); \ + (var); \ + (var) = ((var)->next.tqe_next)) + +static inline QObject *qlist_entry_obj(const QListEntry *entry) +{ + return entry->value; +} + +QList *qlist_new(void); +QList *qlist_copy(QList *src); +void qlist_append_obj(QList *qlist, QObject *obj); +void qlist_iter(const QList *qlist, + void (*iter)(QObject *obj, void *opaque), void *opaque); +QObject *qlist_pop(QList *qlist); +QObject *qlist_peek(QList *qlist); +int qlist_empty(const QList *qlist); +size_t qlist_size(const QList *qlist); +QList *qobject_to_qlist(const QObject *obj); + +static inline const QListEntry *qlist_first(const QList *qlist) +{ + return QTAILQ_FIRST(&qlist->head); +} + +static inline const QListEntry *qlist_next(const QListEntry *entry) +{ + return QTAILQ_NEXT(entry, next); +} + +#endif /* QLIST_H */ diff --git a/contrib/qemu/include/qapi/qmp/qobject.h b/contrib/qemu/include/qapi/qmp/qobject.h new file mode 100644 index 00000000000..9124649ed20 --- /dev/null +++ b/contrib/qemu/include/qapi/qmp/qobject.h @@ -0,0 +1,112 @@ +/* + * QEMU Object Model. + * + * Based on ideas by Avi Kivity <avi@redhat.com> + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino <lcapitulino@redhat.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + * QObject Reference Counts Terminology + * ------------------------------------ + * + * - Returning references: A function that returns an object may + * return it as either a weak or a strong reference. If the reference + * is strong, you are responsible for calling QDECREF() on the reference + * when you are done. + * + * If the reference is weak, the owner of the reference may free it at + * any time in the future. Before storing the reference anywhere, you + * should call QINCREF() to make the reference strong. + * + * - Transferring ownership: when you transfer ownership of a reference + * by calling a function, you are no longer responsible for calling + * QDECREF() when the reference is no longer needed. In other words, + * when the function returns you must behave as if the reference to the + * passed object was weak. + */ +#ifndef QOBJECT_H +#define QOBJECT_H + +#include <stddef.h> +#include <assert.h> + +typedef enum { + QTYPE_NONE, + QTYPE_QINT, + QTYPE_QSTRING, + QTYPE_QDICT, + QTYPE_QLIST, + QTYPE_QFLOAT, + QTYPE_QBOOL, + QTYPE_QERROR, +} qtype_code; + +struct QObject; + +typedef struct QType { + qtype_code code; + void (*destroy)(struct QObject *); +} QType; + +typedef struct QObject { + const QType *type; + size_t refcnt; +} QObject; + +/* Objects definitions must include this */ +#define QObject_HEAD \ + QObject base + +/* Get the 'base' part of an object */ +#define QOBJECT(obj) (&(obj)->base) + +/* High-level interface for qobject_incref() */ +#define QINCREF(obj) \ + qobject_incref(QOBJECT(obj)) + +/* High-level interface for qobject_decref() */ +#define QDECREF(obj) \ + qobject_decref(obj ? QOBJECT(obj) : NULL) + +/* Initialize an object to default values */ +#define QOBJECT_INIT(obj, qtype_type) \ + obj->base.refcnt = 1; \ + obj->base.type = qtype_type + +/** + * qobject_incref(): Increment QObject's reference count + */ +static inline void qobject_incref(QObject *obj) +{ + if (obj) + obj->refcnt++; +} + +/** + * qobject_decref(): Decrement QObject's reference count, deallocate + * when it reaches zero + */ +static inline void qobject_decref(QObject *obj) +{ + if (obj && --obj->refcnt == 0) { + assert(obj->type != NULL); + assert(obj->type->destroy != NULL); + obj->type->destroy(obj); + } +} + +/** + * qobject_type(): Return the QObject's type + */ +static inline qtype_code qobject_type(const QObject *obj) +{ + assert(obj->type != NULL); + return obj->type->code; +} + +#endif /* QOBJECT_H */ diff --git a/contrib/qemu/include/qapi/qmp/qstring.h b/contrib/qemu/include/qapi/qmp/qstring.h new file mode 100644 index 00000000000..1bc3666107c --- /dev/null +++ b/contrib/qemu/include/qapi/qmp/qstring.h @@ -0,0 +1,36 @@ +/* + * QString Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino <lcapitulino@redhat.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#ifndef QSTRING_H +#define QSTRING_H + +#include <stdint.h> +#include "qapi/qmp/qobject.h" + +typedef struct QString { + QObject_HEAD; + char *string; + size_t length; + size_t capacity; +} QString; + +QString *qstring_new(void); +QString *qstring_from_str(const char *str); +QString *qstring_from_substr(const char *str, int start, int end); +size_t qstring_get_length(const QString *qstring); +const char *qstring_get_str(const QString *qstring); +void qstring_append_int(QString *qstring, int64_t value); +void qstring_append(QString *qstring, const char *str); +void qstring_append_chr(QString *qstring, int c); +QString *qobject_to_qstring(const QObject *obj); + +#endif /* QSTRING_H */ diff --git a/contrib/qemu/include/qapi/qmp/types.h b/contrib/qemu/include/qapi/qmp/types.h new file mode 100644 index 00000000000..7782ec5a602 --- /dev/null +++ b/contrib/qemu/include/qapi/qmp/types.h @@ -0,0 +1,25 @@ +/* + * Include all QEMU objects. + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino <lcapitulino@redhat.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#ifndef QEMU_OBJECTS_H +#define QEMU_OBJECTS_H + +#include "qapi/qmp/qobject.h" +#include "qapi/qmp/qint.h" +#include "qapi/qmp/qfloat.h" +#include "qapi/qmp/qbool.h" +#include "qapi/qmp/qstring.h" +#include "qapi/qmp/qdict.h" +#include "qapi/qmp/qlist.h" +#include "qapi/qmp/qjson.h" + +#endif /* QEMU_OBJECTS_H */ diff --git a/contrib/qemu/include/qemu-common.h b/contrib/qemu/include/qemu-common.h new file mode 100644 index 00000000000..6948bb91774 --- /dev/null +++ b/contrib/qemu/include/qemu-common.h @@ -0,0 +1,478 @@ + +/* Common header file that is included by all of QEMU. + * + * This file is supposed to be included only by .c files. No header file should + * depend on qemu-common.h, as this would easily lead to circular header + * dependencies. + * + * If a header file uses a definition from qemu-common.h, that definition + * must be moved to a separate header file, and the header that uses it + * must include that header. + */ +#ifndef QEMU_COMMON_H +#define QEMU_COMMON_H + +#include "qemu/compiler.h" +#include "config-host.h" +#include "qemu/typedefs.h" + +#if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) || defined(__ia64__) +#define WORDS_ALIGNED +#endif + +#define TFR(expr) do { if ((expr) != -1) break; } while (errno == EINTR) + +/* we put basic includes here to avoid repeating them in device drivers */ +#include <stdlib.h> +#include <stdio.h> +#include <stdarg.h> +#include <stdbool.h> +#include <string.h> +#include <strings.h> +#include <inttypes.h> +#include <limits.h> +#include <time.h> +#include <ctype.h> +#include <errno.h> +#include <unistd.h> +#include <fcntl.h> +#include <sys/stat.h> +#include <sys/time.h> +#include <assert.h> +#include <signal.h> +#include "glib-compat.h" + +#ifdef _WIN32 +#include "sysemu/os-win32.h" +#endif + +#ifdef CONFIG_POSIX +#include "sysemu/os-posix.h" +#endif + +#ifndef O_LARGEFILE +#define O_LARGEFILE 0 +#endif +#ifndef O_BINARY +#define O_BINARY 0 +#endif +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif +#ifndef ENOMEDIUM +#define ENOMEDIUM ENODEV +#endif +#if !defined(ENOTSUP) +#define ENOTSUP 4096 +#endif +#if !defined(ECANCELED) +#define ECANCELED 4097 +#endif +#if !defined(EMEDIUMTYPE) +#define EMEDIUMTYPE 4098 +#endif +#ifndef TIME_MAX +#define TIME_MAX LONG_MAX +#endif + +/* HOST_LONG_BITS is the size of a native pointer in bits. */ +#if UINTPTR_MAX == UINT32_MAX +# define HOST_LONG_BITS 32 +#elif UINTPTR_MAX == UINT64_MAX +# define HOST_LONG_BITS 64 +#else +# error Unknown pointer size +#endif + +typedef int (*fprintf_function)(FILE *f, const char *fmt, ...) + GCC_FMT_ATTR(2, 3); + +#ifdef _WIN32 +#define fsync _commit +#if !defined(lseek) +# define lseek _lseeki64 +#endif +int qemu_ftruncate64(int, int64_t); +#if !defined(ftruncate) +# define ftruncate qemu_ftruncate64 +#endif + +static inline char *realpath(const char *path, char *resolved_path) +{ + _fullpath(resolved_path, path, _MAX_PATH); + return resolved_path; +} +#endif + +/* icount */ +void configure_icount(const char *option); +extern int use_icount; + +#include "qemu/osdep.h" +#include "qemu/bswap.h" + +/* FIXME: Remove NEED_CPU_H. */ +#ifdef NEED_CPU_H +#include "cpu.h" +#endif /* !defined(NEED_CPU_H) */ + +/* main function, renamed */ +#if defined(CONFIG_COCOA) +int qemu_main(int argc, char **argv, char **envp); +#endif + +void qemu_get_timedate(struct tm *tm, int offset); +int qemu_timedate_diff(struct tm *tm); + +#if !GLIB_CHECK_VERSION(2, 20, 0) +/* + * Glib before 2.20.0 doesn't implement g_poll, so wrap it to compile properly + * on older systems. + */ +static inline gint g_poll(GPollFD *fds, guint nfds, gint timeout) +{ + GMainContext *ctx = g_main_context_default(); + return g_main_context_get_poll_func(ctx)(fds, nfds, timeout); +} +#endif + +/** + * is_help_option: + * @s: string to test + * + * Check whether @s is one of the standard strings which indicate + * that the user is asking for a list of the valid values for a + * command option like -cpu or -M. The current accepted strings + * are 'help' and '?'. '?' is deprecated (it is a shell wildcard + * which makes it annoying to use in a reliable way) but provided + * for backwards compatibility. + * + * Returns: true if @s is a request for a list. + */ +static inline bool is_help_option(const char *s) +{ + return !strcmp(s, "?") || !strcmp(s, "help"); +} + +/* cutils.c */ +void pstrcpy(char *buf, int buf_size, const char *str); +void strpadcpy(char *buf, int buf_size, const char *str, char pad); +char *pstrcat(char *buf, int buf_size, const char *s); +int strstart(const char *str, const char *val, const char **ptr); +int stristart(const char *str, const char *val, const char **ptr); +int qemu_strnlen(const char *s, int max_len); +char *qemu_strsep(char **input, const char *delim); +time_t mktimegm(struct tm *tm); +int qemu_fls(int i); +int qemu_fdatasync(int fd); +int fcntl_setfl(int fd, int flag); +int qemu_parse_fd(const char *param); + +int parse_uint(const char *s, unsigned long long *value, char **endptr, + int base); +int parse_uint_full(const char *s, unsigned long long *value, int base); + +/* + * strtosz() suffixes used to specify the default treatment of an + * argument passed to strtosz() without an explicit suffix. + * These should be defined using upper case characters in the range + * A-Z, as strtosz() will use qemu_toupper() on the given argument + * prior to comparison. + */ +#define STRTOSZ_DEFSUFFIX_EB 'E' +#define STRTOSZ_DEFSUFFIX_PB 'P' +#define STRTOSZ_DEFSUFFIX_TB 'T' +#define STRTOSZ_DEFSUFFIX_GB 'G' +#define STRTOSZ_DEFSUFFIX_MB 'M' +#define STRTOSZ_DEFSUFFIX_KB 'K' +#define STRTOSZ_DEFSUFFIX_B 'B' +int64_t strtosz(const char *nptr, char **end); +int64_t strtosz_suffix(const char *nptr, char **end, const char default_suffix); +int64_t strtosz_suffix_unit(const char *nptr, char **end, + const char default_suffix, int64_t unit); + +/* path.c */ +void init_paths(const char *prefix); +const char *path(const char *pathname); + +#define qemu_isalnum(c) isalnum((unsigned char)(c)) +#define qemu_isalpha(c) isalpha((unsigned char)(c)) +#define qemu_iscntrl(c) iscntrl((unsigned char)(c)) +#define qemu_isdigit(c) isdigit((unsigned char)(c)) +#define qemu_isgraph(c) isgraph((unsigned char)(c)) +#define qemu_islower(c) islower((unsigned char)(c)) +#define qemu_isprint(c) isprint((unsigned char)(c)) +#define qemu_ispunct(c) ispunct((unsigned char)(c)) +#define qemu_isspace(c) isspace((unsigned char)(c)) +#define qemu_isupper(c) isupper((unsigned char)(c)) +#define qemu_isxdigit(c) isxdigit((unsigned char)(c)) +#define qemu_tolower(c) tolower((unsigned char)(c)) +#define qemu_toupper(c) toupper((unsigned char)(c)) +#define qemu_isascii(c) isascii((unsigned char)(c)) +#define qemu_toascii(c) toascii((unsigned char)(c)) + +void *qemu_oom_check(void *ptr); + +ssize_t qemu_write_full(int fd, const void *buf, size_t count) + QEMU_WARN_UNUSED_RESULT; +ssize_t qemu_send_full(int fd, const void *buf, size_t count, int flags) + QEMU_WARN_UNUSED_RESULT; +ssize_t qemu_recv_full(int fd, void *buf, size_t count, int flags) + QEMU_WARN_UNUSED_RESULT; + +#ifndef _WIN32 +int qemu_pipe(int pipefd[2]); +/* like openpty() but also makes it raw; return master fd */ +int qemu_openpty_raw(int *aslave, char *pty_name); +#endif + +#ifdef _WIN32 +/* MinGW needs type casts for the 'buf' and 'optval' arguments. */ +#define qemu_getsockopt(sockfd, level, optname, optval, optlen) \ + getsockopt(sockfd, level, optname, (void *)optval, optlen) +#define qemu_setsockopt(sockfd, level, optname, optval, optlen) \ + setsockopt(sockfd, level, optname, (const void *)optval, optlen) +#define qemu_recv(sockfd, buf, len, flags) recv(sockfd, (void *)buf, len, flags) +#define qemu_sendto(sockfd, buf, len, flags, destaddr, addrlen) \ + sendto(sockfd, (const void *)buf, len, flags, destaddr, addrlen) +#else +#define qemu_getsockopt(sockfd, level, optname, optval, optlen) \ + getsockopt(sockfd, level, optname, optval, optlen) +#define qemu_setsockopt(sockfd, level, optname, optval, optlen) \ + setsockopt(sockfd, level, optname, optval, optlen) +#define qemu_recv(sockfd, buf, len, flags) recv(sockfd, buf, len, flags) +#define qemu_sendto(sockfd, buf, len, flags, destaddr, addrlen) \ + sendto(sockfd, buf, len, flags, destaddr, addrlen) +#endif + +/* Error handling. */ + +void QEMU_NORETURN hw_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2); + +struct ParallelIOArg { + void *buffer; + int count; +}; + +typedef int (*DMA_transfer_handler) (void *opaque, int nchan, int pos, int size); + +typedef uint64_t pcibus_t; + +typedef enum LostTickPolicy { + LOST_TICK_DISCARD, + LOST_TICK_DELAY, + LOST_TICK_MERGE, + LOST_TICK_SLEW, + LOST_TICK_MAX +} LostTickPolicy; + +typedef struct PCIHostDeviceAddress { + unsigned int domain; + unsigned int bus; + unsigned int slot; + unsigned int function; +} PCIHostDeviceAddress; + +void tcg_exec_init(unsigned long tb_size); +bool tcg_enabled(void); + +void cpu_exec_init_all(void); + +/* CPU save/load. */ +#ifdef CPU_SAVE_VERSION +void cpu_save(QEMUFile *f, void *opaque); +int cpu_load(QEMUFile *f, void *opaque, int version_id); +#endif + +/* Unblock cpu */ +void qemu_cpu_kick_self(void); + +/* work queue */ +struct qemu_work_item { + struct qemu_work_item *next; + void (*func)(void *data); + void *data; + int done; + bool free; +}; + + +/** + * Sends a (part of) iovec down a socket, yielding when the socket is full, or + * Receives data into a (part of) iovec from a socket, + * yielding when there is no data in the socket. + * The same interface as qemu_sendv_recvv(), with added yielding. + * XXX should mark these as coroutine_fn + */ +ssize_t qemu_co_sendv_recvv(int sockfd, struct iovec *iov, unsigned iov_cnt, + size_t offset, size_t bytes, bool do_send); +#define qemu_co_recvv(sockfd, iov, iov_cnt, offset, bytes) \ + qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, false) +#define qemu_co_sendv(sockfd, iov, iov_cnt, offset, bytes) \ + qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, true) + +/** + * The same as above, but with just a single buffer + */ +ssize_t qemu_co_send_recv(int sockfd, void *buf, size_t bytes, bool do_send); +#define qemu_co_recv(sockfd, buf, bytes) \ + qemu_co_send_recv(sockfd, buf, bytes, false) +#define qemu_co_send(sockfd, buf, bytes) \ + qemu_co_send_recv(sockfd, buf, bytes, true) + +typedef struct QEMUIOVector { + struct iovec *iov; + int niov; + int nalloc; + size_t size; +} QEMUIOVector; + +void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint); +void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov); +void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len); +void qemu_iovec_concat(QEMUIOVector *dst, + QEMUIOVector *src, size_t soffset, size_t sbytes); +void qemu_iovec_concat_iov(QEMUIOVector *dst, + struct iovec *src_iov, unsigned int src_cnt, + size_t soffset, size_t sbytes); +void qemu_iovec_destroy(QEMUIOVector *qiov); +void qemu_iovec_reset(QEMUIOVector *qiov); +size_t qemu_iovec_to_buf(QEMUIOVector *qiov, size_t offset, + void *buf, size_t bytes); +size_t qemu_iovec_from_buf(QEMUIOVector *qiov, size_t offset, + const void *buf, size_t bytes); +size_t qemu_iovec_memset(QEMUIOVector *qiov, size_t offset, + int fillc, size_t bytes); + +bool buffer_is_zero(const void *buf, size_t len); + +void qemu_progress_init(int enabled, float min_skip); +void qemu_progress_end(void); +void qemu_progress_print(float delta, int max); +const char *qemu_get_vm_name(void); + +#define QEMU_FILE_TYPE_BIOS 0 +#define QEMU_FILE_TYPE_KEYMAP 1 +char *qemu_find_file(int type, const char *name); + +/* OS specific functions */ +void os_setup_early_signal_handling(void); +char *os_find_datadir(const char *argv0); +void os_parse_cmd_args(int index, const char *optarg); +void os_pidfile_error(void); + +/* Convert a byte between binary and BCD. */ +static inline uint8_t to_bcd(uint8_t val) +{ + return ((val / 10) << 4) | (val % 10); +} + +static inline uint8_t from_bcd(uint8_t val) +{ + return ((val >> 4) * 10) + (val & 0x0f); +} + +/* compute with 96 bit intermediate result: (a*b)/c */ +static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) +{ + union { + uint64_t ll; + struct { +#ifdef HOST_WORDS_BIGENDIAN + uint32_t high, low; +#else + uint32_t low, high; +#endif + } l; + } u, res; + uint64_t rl, rh; + + u.ll = a; + rl = (uint64_t)u.l.low * (uint64_t)b; + rh = (uint64_t)u.l.high * (uint64_t)b; + rh += (rl >> 32); + res.l.high = rh / c; + res.l.low = (((rh % c) << 32) + (rl & 0xffffffff)) / c; + return res.ll; +} + +/* Round number down to multiple */ +#define QEMU_ALIGN_DOWN(n, m) ((n) / (m) * (m)) + +/* Round number up to multiple */ +#define QEMU_ALIGN_UP(n, m) QEMU_ALIGN_DOWN((n) + (m) - 1, (m)) + +static inline bool is_power_of_2(uint64_t value) +{ + if (!value) { + return 0; + } + + return !(value & (value - 1)); +} + +/* round down to the nearest power of 2*/ +int64_t pow2floor(int64_t value); + +#include "qemu/module.h" + +/* + * Implementation of ULEB128 (http://en.wikipedia.org/wiki/LEB128) + * Input is limited to 14-bit numbers + */ + +int uleb128_encode_small(uint8_t *out, uint32_t n); +int uleb128_decode_small(const uint8_t *in, uint32_t *n); + +/* unicode.c */ +int mod_utf8_codepoint(const char *s, size_t n, char **end); + +/* + * Hexdump a buffer to a file. An optional string prefix is added to every line + */ + +void qemu_hexdump(const char *buf, FILE *fp, const char *prefix, size_t size); + +/* vector definitions */ +#ifdef __ALTIVEC__ +#include <altivec.h> +/* The altivec.h header says we're allowed to undef these for + * C++ compatibility. Here we don't care about C++, but we + * undef them anyway to avoid namespace pollution. + */ +#undef vector +#undef pixel +#undef bool +#define VECTYPE __vector unsigned char +#define SPLAT(p) vec_splat(vec_ld(0, p), 0) +#define ALL_EQ(v1, v2) vec_all_eq(v1, v2) +/* altivec.h may redefine the bool macro as vector type. + * Reset it to POSIX semantics. */ +#define bool _Bool +#elif defined __SSE2__ +#include <emmintrin.h> +#define VECTYPE __m128i +#define SPLAT(p) _mm_set1_epi8(*(p)) +#define ALL_EQ(v1, v2) (_mm_movemask_epi8(_mm_cmpeq_epi8(v1, v2)) == 0xFFFF) +#else +#define VECTYPE unsigned long +#define SPLAT(p) (*(p) * (~0UL / 255)) +#define ALL_EQ(v1, v2) ((v1) == (v2)) +#endif + +#define BUFFER_FIND_NONZERO_OFFSET_UNROLL_FACTOR 8 +static inline bool +can_use_buffer_find_nonzero_offset(const void *buf, size_t len) +{ + return (len % (BUFFER_FIND_NONZERO_OFFSET_UNROLL_FACTOR + * sizeof(VECTYPE)) == 0 + && ((uintptr_t) buf) % sizeof(VECTYPE) == 0); +} +size_t buffer_find_nonzero_offset(const void *buf, size_t len); + +/* + * helper to parse debug environment variables + */ +int parse_debug_env(const char *name, int max, int initial); + +#endif diff --git a/contrib/qemu/include/qemu/aes.h b/contrib/qemu/include/qemu/aes.h new file mode 100644 index 00000000000..e79c707436f --- /dev/null +++ b/contrib/qemu/include/qemu/aes.h @@ -0,0 +1,45 @@ +#ifndef QEMU_AES_H +#define QEMU_AES_H + +#define AES_MAXNR 14 +#define AES_BLOCK_SIZE 16 + +struct aes_key_st { + uint32_t rd_key[4 *(AES_MAXNR + 1)]; + int rounds; +}; +typedef struct aes_key_st AES_KEY; + +int AES_set_encrypt_key(const unsigned char *userKey, const int bits, + AES_KEY *key); +int AES_set_decrypt_key(const unsigned char *userKey, const int bits, + AES_KEY *key); + +void AES_encrypt(const unsigned char *in, unsigned char *out, + const AES_KEY *key); +void AES_decrypt(const unsigned char *in, unsigned char *out, + const AES_KEY *key); +void AES_cbc_encrypt(const unsigned char *in, unsigned char *out, + const unsigned long length, const AES_KEY *key, + unsigned char *ivec, const int enc); + +/* +AES_Te0[x] = S [x].[02, 01, 01, 03]; +AES_Te1[x] = S [x].[03, 02, 01, 01]; +AES_Te2[x] = S [x].[01, 03, 02, 01]; +AES_Te3[x] = S [x].[01, 01, 03, 02]; +AES_Te4[x] = S [x].[01, 01, 01, 01]; + +AES_Td0[x] = Si[x].[0e, 09, 0d, 0b]; +AES_Td1[x] = Si[x].[0b, 0e, 09, 0d]; +AES_Td2[x] = Si[x].[0d, 0b, 0e, 09]; +AES_Td3[x] = Si[x].[09, 0d, 0b, 0e]; +AES_Td4[x] = Si[x].[01, 01, 01, 01]; +*/ + +extern const uint32_t AES_Te0[256], AES_Te1[256], AES_Te2[256], + AES_Te3[256], AES_Te4[256]; +extern const uint32_t AES_Td0[256], AES_Td1[256], AES_Td2[256], + AES_Td3[256], AES_Td4[256]; + +#endif diff --git a/contrib/qemu/include/qemu/atomic.h b/contrib/qemu/include/qemu/atomic.h new file mode 100644 index 00000000000..0aa89133018 --- /dev/null +++ b/contrib/qemu/include/qemu/atomic.h @@ -0,0 +1,202 @@ +/* + * Simple interface for atomic operations. + * + * Copyright (C) 2013 Red Hat, Inc. + * + * Author: Paolo Bonzini <pbonzini@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef __QEMU_ATOMIC_H +#define __QEMU_ATOMIC_H 1 + +#include "qemu/compiler.h" + +/* For C11 atomic ops */ + +/* Compiler barrier */ +#define barrier() ({ asm volatile("" ::: "memory"); (void)0; }) + +#ifndef __ATOMIC_RELAXED + +/* + * We use GCC builtin if it's available, as that can use mfence on + * 32-bit as well, e.g. if built with -march=pentium-m. However, on + * i386 the spec is buggy, and the implementation followed it until + * 4.3 (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793). + */ +#if defined(__i386__) || defined(__x86_64__) +#if !QEMU_GNUC_PREREQ(4, 4) +#if defined __x86_64__ +#define smp_mb() ({ asm volatile("mfence" ::: "memory"); (void)0; }) +#else +#define smp_mb() ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; }) +#endif +#endif +#endif + + +#ifdef __alpha__ +#define smp_read_barrier_depends() asm volatile("mb":::"memory") +#endif + +#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__) + +/* + * Because of the strongly ordered storage model, wmb() and rmb() are nops + * here (a compiler barrier only). QEMU doesn't do accesses to write-combining + * qemu memory or non-temporal load/stores from C code. + */ +#define smp_wmb() barrier() +#define smp_rmb() barrier() + +/* + * __sync_lock_test_and_set() is documented to be an acquire barrier only, + * but it is a full barrier at the hardware level. Add a compiler barrier + * to make it a full barrier also at the compiler level. + */ +#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i)) + +/* + * Load/store with Java volatile semantics. + */ +#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i)) + +#elif defined(_ARCH_PPC) + +/* + * We use an eieio() for wmb() on powerpc. This assumes we don't + * need to order cacheable and non-cacheable stores with respect to + * each other. + * + * smp_mb has the same problem as on x86 for not-very-new GCC + * (http://patchwork.ozlabs.org/patch/126184/, Nov 2011). + */ +#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; }) +#if defined(__powerpc64__) +#define smp_rmb() ({ asm volatile("lwsync" ::: "memory"); (void)0; }) +#else +#define smp_rmb() ({ asm volatile("sync" ::: "memory"); (void)0; }) +#endif +#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; }) + +#endif /* _ARCH_PPC */ + +#endif /* C11 atomics */ + +/* + * For (host) platforms we don't have explicit barrier definitions + * for, we use the gcc __sync_synchronize() primitive to generate a + * full barrier. This should be safe on all platforms, though it may + * be overkill for smp_wmb() and smp_rmb(). + */ +#ifndef smp_mb +#define smp_mb() __sync_synchronize() +#endif + +#ifndef smp_wmb +#ifdef __ATOMIC_RELEASE +#define smp_wmb() __atomic_thread_fence(__ATOMIC_RELEASE) +#else +#define smp_wmb() __sync_synchronize() +#endif +#endif + +#ifndef smp_rmb +#ifdef __ATOMIC_ACQUIRE +#define smp_rmb() __atomic_thread_fence(__ATOMIC_ACQUIRE) +#else +#define smp_rmb() __sync_synchronize() +#endif +#endif + +#ifndef smp_read_barrier_depends +#ifdef __ATOMIC_CONSUME +#define smp_read_barrier_depends() __atomic_thread_fence(__ATOMIC_CONSUME) +#else +#define smp_read_barrier_depends() barrier() +#endif +#endif + +#ifndef atomic_read +#define atomic_read(ptr) (*(__typeof__(*ptr) *volatile) (ptr)) +#endif + +#ifndef atomic_set +#define atomic_set(ptr, i) ((*(__typeof__(*ptr) *volatile) (ptr)) = (i)) +#endif + +/* These have the same semantics as Java volatile variables. + * See http://gee.cs.oswego.edu/dl/jmm/cookbook.html: + * "1. Issue a StoreStore barrier (wmb) before each volatile store." + * 2. Issue a StoreLoad barrier after each volatile store. + * Note that you could instead issue one before each volatile load, but + * this would be slower for typical programs using volatiles in which + * reads greatly outnumber writes. Alternatively, if available, you + * can implement volatile store as an atomic instruction (for example + * XCHG on x86) and omit the barrier. This may be more efficient if + * atomic instructions are cheaper than StoreLoad barriers. + * 3. Issue LoadLoad and LoadStore barriers after each volatile load." + * + * If you prefer to think in terms of "pairing" of memory barriers, + * an atomic_mb_read pairs with an atomic_mb_set. + * + * And for the few ia64 lovers that exist, an atomic_mb_read is a ld.acq, + * while an atomic_mb_set is a st.rel followed by a memory barrier. + * + * These are a bit weaker than __atomic_load/store with __ATOMIC_SEQ_CST + * (see docs/atomics.txt), and I'm not sure that __ATOMIC_ACQ_REL is enough. + * Just always use the barriers manually by the rules above. + */ +#ifndef atomic_mb_read +#define atomic_mb_read(ptr) ({ \ + typeof(*ptr) _val = atomic_read(ptr); \ + smp_rmb(); \ + _val; \ +}) +#endif + +#ifndef atomic_mb_set +#define atomic_mb_set(ptr, i) do { \ + smp_wmb(); \ + atomic_set(ptr, i); \ + smp_mb(); \ +} while (0) +#endif + +#ifndef atomic_xchg +#ifdef __ATOMIC_SEQ_CST +#define atomic_xchg(ptr, i) ({ \ + typeof(*ptr) _new = (i), _old; \ + __atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \ + _old; \ +}) +#elif defined __clang__ +#define atomic_xchg(ptr, i) __sync_exchange(ptr, i) +#else +/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */ +#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i)) +#endif +#endif + +/* Provide shorter names for GCC atomic builtins. */ +#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1) +#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1) +#define atomic_fetch_add __sync_fetch_and_add +#define atomic_fetch_sub __sync_fetch_and_sub +#define atomic_fetch_and __sync_fetch_and_and +#define atomic_fetch_or __sync_fetch_and_or +#define atomic_cmpxchg __sync_val_compare_and_swap + +/* And even shorter names that return void. */ +#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1)) +#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1)) +#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n)) +#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n)) +#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n)) +#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n)) + +#endif diff --git a/contrib/qemu/include/qemu/bitmap.h b/contrib/qemu/include/qemu/bitmap.h new file mode 100644 index 00000000000..308bbb71e9b --- /dev/null +++ b/contrib/qemu/include/qemu/bitmap.h @@ -0,0 +1,222 @@ +/* + * Bitmap Module + * + * Copyright (C) 2010 Corentin Chary <corentin.chary@gmail.com> + * + * Mostly inspired by (stolen from) linux/bitmap.h and linux/bitops.h + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#ifndef BITMAP_H +#define BITMAP_H + +#include "qemu-common.h" +#include "qemu/bitops.h" + +/* + * The available bitmap operations and their rough meaning in the + * case that the bitmap is a single unsigned long are thus: + * + * Note that nbits should be always a compile time evaluable constant. + * Otherwise many inlines will generate horrible code. + * + * bitmap_zero(dst, nbits) *dst = 0UL + * bitmap_fill(dst, nbits) *dst = ~0UL + * bitmap_copy(dst, src, nbits) *dst = *src + * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2 + * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2 + * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2 + * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2) + * bitmap_complement(dst, src, nbits) *dst = ~(*src) + * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal? + * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap? + * bitmap_empty(src, nbits) Are all bits zero in *src? + * bitmap_full(src, nbits) Are all bits set in *src? + * bitmap_set(dst, pos, nbits) Set specified bit area + * bitmap_clear(dst, pos, nbits) Clear specified bit area + * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area + */ + +/* + * Also the following operations apply to bitmaps. + * + * set_bit(bit, addr) *addr |= bit + * clear_bit(bit, addr) *addr &= ~bit + * change_bit(bit, addr) *addr ^= bit + * test_bit(bit, addr) Is bit set in *addr? + * test_and_set_bit(bit, addr) Set bit and return old value + * test_and_clear_bit(bit, addr) Clear bit and return old value + * test_and_change_bit(bit, addr) Change bit and return old value + * find_first_zero_bit(addr, nbits) Position first zero bit in *addr + * find_first_bit(addr, nbits) Position first set bit in *addr + * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit + * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit + */ + +#define BITMAP_LAST_WORD_MASK(nbits) \ + ( \ + ((nbits) % BITS_PER_LONG) ? \ + (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \ + ) + +#define DECLARE_BITMAP(name,bits) \ + unsigned long name[BITS_TO_LONGS(bits)] + +#define small_nbits(nbits) \ + ((nbits) <= BITS_PER_LONG) + +int slow_bitmap_empty(const unsigned long *bitmap, int bits); +int slow_bitmap_full(const unsigned long *bitmap, int bits); +int slow_bitmap_equal(const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); +void slow_bitmap_complement(unsigned long *dst, const unsigned long *src, + int bits); +void slow_bitmap_shift_right(unsigned long *dst, + const unsigned long *src, int shift, int bits); +void slow_bitmap_shift_left(unsigned long *dst, + const unsigned long *src, int shift, int bits); +int slow_bitmap_and(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); +void slow_bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); +void slow_bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); +int slow_bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); +int slow_bitmap_intersects(const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); + +static inline unsigned long *bitmap_new(int nbits) +{ + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + return g_malloc0(len); +} + +static inline void bitmap_zero(unsigned long *dst, int nbits) +{ + if (small_nbits(nbits)) { + *dst = 0UL; + } else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} + +static inline void bitmap_fill(unsigned long *dst, int nbits) +{ + size_t nlongs = BITS_TO_LONGS(nbits); + if (!small_nbits(nbits)) { + int len = (nlongs - 1) * sizeof(unsigned long); + memset(dst, 0xff, len); + } + dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits); +} + +static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, + int nbits) +{ + if (small_nbits(nbits)) { + *dst = *src; + } else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memcpy(dst, src, len); + } +} + +static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_nbits(nbits)) { + return (*dst = *src1 & *src2) != 0; + } + return slow_bitmap_and(dst, src1, src2, nbits); +} + +static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_nbits(nbits)) { + *dst = *src1 | *src2; + } else { + slow_bitmap_or(dst, src1, src2, nbits); + } +} + +static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_nbits(nbits)) { + *dst = *src1 ^ *src2; + } else { + slow_bitmap_xor(dst, src1, src2, nbits); + } +} + +static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_nbits(nbits)) { + return (*dst = *src1 & ~(*src2)) != 0; + } + return slow_bitmap_andnot(dst, src1, src2, nbits); +} + +static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, + int nbits) +{ + if (small_nbits(nbits)) { + *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits); + } else { + slow_bitmap_complement(dst, src, nbits); + } +} + +static inline int bitmap_equal(const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_nbits(nbits)) { + return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); + } else { + return slow_bitmap_equal(src1, src2, nbits); + } +} + +static inline int bitmap_empty(const unsigned long *src, int nbits) +{ + if (small_nbits(nbits)) { + return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); + } else { + return slow_bitmap_empty(src, nbits); + } +} + +static inline int bitmap_full(const unsigned long *src, int nbits) +{ + if (small_nbits(nbits)) { + return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); + } else { + return slow_bitmap_full(src, nbits); + } +} + +static inline int bitmap_intersects(const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_nbits(nbits)) { + return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; + } else { + return slow_bitmap_intersects(src1, src2, nbits); + } +} + +void bitmap_set(unsigned long *map, int i, int len); +void bitmap_clear(unsigned long *map, int start, int nr); +unsigned long bitmap_find_next_zero_area(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask); + +#endif /* BITMAP_H */ diff --git a/contrib/qemu/include/qemu/bitops.h b/contrib/qemu/include/qemu/bitops.h new file mode 100644 index 00000000000..affcc969dcf --- /dev/null +++ b/contrib/qemu/include/qemu/bitops.h @@ -0,0 +1,276 @@ +/* + * Bitops Module + * + * Copyright (C) 2010 Corentin Chary <corentin.chary@gmail.com> + * + * Mostly inspired by (stolen from) linux/bitmap.h and linux/bitops.h + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#ifndef BITOPS_H +#define BITOPS_H + +#include "qemu-common.h" +#include "host-utils.h" + +#define BITS_PER_BYTE CHAR_BIT +#define BITS_PER_LONG (sizeof (unsigned long) * BITS_PER_BYTE) + +#define BIT(nr) (1UL << (nr)) +#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) + +/** + * set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + */ +static inline void set_bit(int nr, unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = addr + BIT_WORD(nr); + + *p |= mask; +} + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + */ +static inline void clear_bit(int nr, unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = addr + BIT_WORD(nr); + + *p &= ~mask; +} + +/** + * change_bit - Toggle a bit in memory + * @nr: Bit to change + * @addr: Address to start counting from + */ +static inline void change_bit(int nr, unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = addr + BIT_WORD(nr); + + *p ^= mask; +} + +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + */ +static inline int test_and_set_bit(int nr, unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = addr + BIT_WORD(nr); + unsigned long old = *p; + + *p = old | mask; + return (old & mask) != 0; +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + */ +static inline int test_and_clear_bit(int nr, unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = addr + BIT_WORD(nr); + unsigned long old = *p; + + *p = old & ~mask; + return (old & mask) != 0; +} + +/** + * test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + */ +static inline int test_and_change_bit(int nr, unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = addr + BIT_WORD(nr); + unsigned long old = *p; + + *p = old ^ mask; + return (old & mask) != 0; +} + +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static inline int test_bit(int nr, const unsigned long *addr) +{ + return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); +} + +/** + * find_last_bit - find the last set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first set bit, or size. + */ +unsigned long find_last_bit(const unsigned long *addr, + unsigned long size); + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + */ +unsigned long find_next_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); + +/** + * find_next_zero_bit - find the next cleared bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + */ + +unsigned long find_next_zero_bit(const unsigned long *addr, + unsigned long size, + unsigned long offset); + +/** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first set bit. + */ +static inline unsigned long find_first_bit(const unsigned long *addr, + unsigned long size) +{ + return find_next_bit(addr, size, 0); +} + +/** + * find_first_zero_bit - find the first cleared bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first cleared bit. + */ +static inline unsigned long find_first_zero_bit(const unsigned long *addr, + unsigned long size) +{ + return find_next_zero_bit(addr, size, 0); +} + +static inline unsigned long hweight_long(unsigned long w) +{ + unsigned long count; + + for (count = 0; w; w >>= 1) { + count += w & 1; + } + return count; +} + +/** + * extract32: + * @value: the value to extract the bit field from + * @start: the lowest bit in the bit field (numbered from 0) + * @length: the length of the bit field + * + * Extract from the 32 bit input @value the bit field specified by the + * @start and @length parameters, and return it. The bit field must + * lie entirely within the 32 bit word. It is valid to request that + * all 32 bits are returned (ie @length 32 and @start 0). + * + * Returns: the value of the bit field extracted from the input value. + */ +static inline uint32_t extract32(uint32_t value, int start, int length) +{ + assert(start >= 0 && length > 0 && length <= 32 - start); + return (value >> start) & (~0U >> (32 - length)); +} + +/** + * extract64: + * @value: the value to extract the bit field from + * @start: the lowest bit in the bit field (numbered from 0) + * @length: the length of the bit field + * + * Extract from the 64 bit input @value the bit field specified by the + * @start and @length parameters, and return it. The bit field must + * lie entirely within the 64 bit word. It is valid to request that + * all 64 bits are returned (ie @length 64 and @start 0). + * + * Returns: the value of the bit field extracted from the input value. + */ +static inline uint64_t extract64(uint64_t value, int start, int length) +{ + assert(start >= 0 && length > 0 && length <= 64 - start); + return (value >> start) & (~0ULL >> (64 - length)); +} + +/** + * deposit32: + * @value: initial value to insert bit field into + * @start: the lowest bit in the bit field (numbered from 0) + * @length: the length of the bit field + * @fieldval: the value to insert into the bit field + * + * Deposit @fieldval into the 32 bit @value at the bit field specified + * by the @start and @length parameters, and return the modified + * @value. Bits of @value outside the bit field are not modified. + * Bits of @fieldval above the least significant @length bits are + * ignored. The bit field must lie entirely within the 32 bit word. + * It is valid to request that all 32 bits are modified (ie @length + * 32 and @start 0). + * + * Returns: the modified @value. + */ +static inline uint32_t deposit32(uint32_t value, int start, int length, + uint32_t fieldval) +{ + uint32_t mask; + assert(start >= 0 && length > 0 && length <= 32 - start); + mask = (~0U >> (32 - length)) << start; + return (value & ~mask) | ((fieldval << start) & mask); +} + +/** + * deposit64: + * @value: initial value to insert bit field into + * @start: the lowest bit in the bit field (numbered from 0) + * @length: the length of the bit field + * @fieldval: the value to insert into the bit field + * + * Deposit @fieldval into the 64 bit @value at the bit field specified + * by the @start and @length parameters, and return the modified + * @value. Bits of @value outside the bit field are not modified. + * Bits of @fieldval above the least significant @length bits are + * ignored. The bit field must lie entirely within the 64 bit word. + * It is valid to request that all 64 bits are modified (ie @length + * 64 and @start 0). + * + * Returns: the modified @value. + */ +static inline uint64_t deposit64(uint64_t value, int start, int length, + uint64_t fieldval) +{ + uint64_t mask; + assert(start >= 0 && length > 0 && length <= 64 - start); + mask = (~0ULL >> (64 - length)) << start; + return (value & ~mask) | ((fieldval << start) & mask); +} + +#endif diff --git a/contrib/qemu/include/qemu/bswap.h b/contrib/qemu/include/qemu/bswap.h new file mode 100644 index 00000000000..14a5f657ce5 --- /dev/null +++ b/contrib/qemu/include/qemu/bswap.h @@ -0,0 +1,478 @@ +#ifndef BSWAP_H +#define BSWAP_H + +#include "config-host.h" +#include <inttypes.h> +#include <limits.h> +#include <string.h> +#include "fpu/softfloat.h" + +#ifdef CONFIG_MACHINE_BSWAP_H +# include <sys/endian.h> +# include <sys/types.h> +# include <machine/bswap.h> +#elif defined(CONFIG_BYTESWAP_H) +# include <byteswap.h> + +static inline uint16_t bswap16(uint16_t x) +{ + return bswap_16(x); +} + +static inline uint32_t bswap32(uint32_t x) +{ + return bswap_32(x); +} + +static inline uint64_t bswap64(uint64_t x) +{ + return bswap_64(x); +} +# else +static inline uint16_t bswap16(uint16_t x) +{ + return (((x & 0x00ff) << 8) | + ((x & 0xff00) >> 8)); +} + +static inline uint32_t bswap32(uint32_t x) +{ + return (((x & 0x000000ffU) << 24) | + ((x & 0x0000ff00U) << 8) | + ((x & 0x00ff0000U) >> 8) | + ((x & 0xff000000U) >> 24)); +} + +static inline uint64_t bswap64(uint64_t x) +{ + return (((x & 0x00000000000000ffULL) << 56) | + ((x & 0x000000000000ff00ULL) << 40) | + ((x & 0x0000000000ff0000ULL) << 24) | + ((x & 0x00000000ff000000ULL) << 8) | + ((x & 0x000000ff00000000ULL) >> 8) | + ((x & 0x0000ff0000000000ULL) >> 24) | + ((x & 0x00ff000000000000ULL) >> 40) | + ((x & 0xff00000000000000ULL) >> 56)); +} +#endif /* ! CONFIG_MACHINE_BSWAP_H */ + +static inline void bswap16s(uint16_t *s) +{ + *s = bswap16(*s); +} + +static inline void bswap32s(uint32_t *s) +{ + *s = bswap32(*s); +} + +static inline void bswap64s(uint64_t *s) +{ + *s = bswap64(*s); +} + +#if defined(HOST_WORDS_BIGENDIAN) +#define be_bswap(v, size) (v) +#define le_bswap(v, size) glue(bswap, size)(v) +#define be_bswaps(v, size) +#define le_bswaps(p, size) do { *p = glue(bswap, size)(*p); } while(0) +#else +#define le_bswap(v, size) (v) +#define be_bswap(v, size) glue(bswap, size)(v) +#define le_bswaps(v, size) +#define be_bswaps(p, size) do { *p = glue(bswap, size)(*p); } while(0) +#endif + +#define CPU_CONVERT(endian, size, type)\ +static inline type endian ## size ## _to_cpu(type v)\ +{\ + return glue(endian, _bswap)(v, size);\ +}\ +\ +static inline type cpu_to_ ## endian ## size(type v)\ +{\ + return glue(endian, _bswap)(v, size);\ +}\ +\ +static inline void endian ## size ## _to_cpus(type *p)\ +{\ + glue(endian, _bswaps)(p, size);\ +}\ +\ +static inline void cpu_to_ ## endian ## size ## s(type *p)\ +{\ + glue(endian, _bswaps)(p, size);\ +}\ +\ +static inline type endian ## size ## _to_cpup(const type *p)\ +{\ + return glue(glue(endian, size), _to_cpu)(*p);\ +}\ +\ +static inline void cpu_to_ ## endian ## size ## w(type *p, type v)\ +{\ + *p = glue(glue(cpu_to_, endian), size)(v);\ +} + +CPU_CONVERT(be, 16, uint16_t) +CPU_CONVERT(be, 32, uint32_t) +CPU_CONVERT(be, 64, uint64_t) + +CPU_CONVERT(le, 16, uint16_t) +CPU_CONVERT(le, 32, uint32_t) +CPU_CONVERT(le, 64, uint64_t) + +/* len must be one of 1, 2, 4 */ +static inline uint32_t qemu_bswap_len(uint32_t value, int len) +{ + return bswap32(value) >> (32 - 8 * len); +} + +/* Unions for reinterpreting between floats and integers. */ + +typedef union { + float32 f; + uint32_t l; +} CPU_FloatU; + +typedef union { + float64 d; +#if defined(HOST_WORDS_BIGENDIAN) + struct { + uint32_t upper; + uint32_t lower; + } l; +#else + struct { + uint32_t lower; + uint32_t upper; + } l; +#endif + uint64_t ll; +} CPU_DoubleU; + +typedef union { + floatx80 d; + struct { + uint64_t lower; + uint16_t upper; + } l; +} CPU_LDoubleU; + +typedef union { + float128 q; +#if defined(HOST_WORDS_BIGENDIAN) + struct { + uint32_t upmost; + uint32_t upper; + uint32_t lower; + uint32_t lowest; + } l; + struct { + uint64_t upper; + uint64_t lower; + } ll; +#else + struct { + uint32_t lowest; + uint32_t lower; + uint32_t upper; + uint32_t upmost; + } l; + struct { + uint64_t lower; + uint64_t upper; + } ll; +#endif +} CPU_QuadU; + +/* unaligned/endian-independent pointer access */ + +/* + * the generic syntax is: + * + * load: ld{type}{sign}{size}{endian}_p(ptr) + * + * store: st{type}{size}{endian}_p(ptr, val) + * + * Note there are small differences with the softmmu access API! + * + * type is: + * (empty): integer access + * f : float access + * + * sign is: + * (empty): for floats or 32 bit size + * u : unsigned + * s : signed + * + * size is: + * b: 8 bits + * w: 16 bits + * l: 32 bits + * q: 64 bits + * + * endian is: + * (empty): host endian + * be : big endian + * le : little endian + */ + +static inline int ldub_p(const void *ptr) +{ + return *(uint8_t *)ptr; +} + +static inline int ldsb_p(const void *ptr) +{ + return *(int8_t *)ptr; +} + +static inline void stb_p(void *ptr, int v) +{ + *(uint8_t *)ptr = v; +} + +/* Any compiler worth its salt will turn these memcpy into native unaligned + operations. Thus we don't need to play games with packed attributes, or + inline byte-by-byte stores. */ + +static inline int lduw_p(const void *ptr) +{ + uint16_t r; + memcpy(&r, ptr, sizeof(r)); + return r; +} + +static inline int ldsw_p(const void *ptr) +{ + int16_t r; + memcpy(&r, ptr, sizeof(r)); + return r; +} + +static inline void stw_p(void *ptr, uint16_t v) +{ + memcpy(ptr, &v, sizeof(v)); +} + +static inline int ldl_p(const void *ptr) +{ + int32_t r; + memcpy(&r, ptr, sizeof(r)); + return r; +} + +static inline void stl_p(void *ptr, uint32_t v) +{ + memcpy(ptr, &v, sizeof(v)); +} + +static inline uint64_t ldq_p(const void *ptr) +{ + uint64_t r; + memcpy(&r, ptr, sizeof(r)); + return r; +} + +static inline void stq_p(void *ptr, uint64_t v) +{ + memcpy(ptr, &v, sizeof(v)); +} + +static inline int lduw_le_p(const void *ptr) +{ + return (uint16_t)le_bswap(lduw_p(ptr), 16); +} + +static inline int ldsw_le_p(const void *ptr) +{ + return (int16_t)le_bswap(lduw_p(ptr), 16); +} + +static inline int ldl_le_p(const void *ptr) +{ + return le_bswap(ldl_p(ptr), 32); +} + +static inline uint64_t ldq_le_p(const void *ptr) +{ + return le_bswap(ldq_p(ptr), 64); +} + +static inline void stw_le_p(void *ptr, int v) +{ + stw_p(ptr, le_bswap(v, 16)); +} + +static inline void stl_le_p(void *ptr, int v) +{ + stl_p(ptr, le_bswap(v, 32)); +} + +static inline void stq_le_p(void *ptr, uint64_t v) +{ + stq_p(ptr, le_bswap(v, 64)); +} + +/* float access */ + +static inline float32 ldfl_le_p(const void *ptr) +{ + CPU_FloatU u; + u.l = ldl_le_p(ptr); + return u.f; +} + +static inline void stfl_le_p(void *ptr, float32 v) +{ + CPU_FloatU u; + u.f = v; + stl_le_p(ptr, u.l); +} + +static inline float64 ldfq_le_p(const void *ptr) +{ + CPU_DoubleU u; + u.ll = ldq_le_p(ptr); + return u.d; +} + +static inline void stfq_le_p(void *ptr, float64 v) +{ + CPU_DoubleU u; + u.d = v; + stq_le_p(ptr, u.ll); +} + +static inline int lduw_be_p(const void *ptr) +{ + return (uint16_t)be_bswap(lduw_p(ptr), 16); +} + +static inline int ldsw_be_p(const void *ptr) +{ + return (int16_t)be_bswap(lduw_p(ptr), 16); +} + +static inline int ldl_be_p(const void *ptr) +{ + return be_bswap(ldl_p(ptr), 32); +} + +static inline uint64_t ldq_be_p(const void *ptr) +{ + return be_bswap(ldq_p(ptr), 64); +} + +static inline void stw_be_p(void *ptr, int v) +{ + stw_p(ptr, be_bswap(v, 16)); +} + +static inline void stl_be_p(void *ptr, int v) +{ + stl_p(ptr, be_bswap(v, 32)); +} + +static inline void stq_be_p(void *ptr, uint64_t v) +{ + stq_p(ptr, be_bswap(v, 64)); +} + +/* float access */ + +static inline float32 ldfl_be_p(const void *ptr) +{ + CPU_FloatU u; + u.l = ldl_be_p(ptr); + return u.f; +} + +static inline void stfl_be_p(void *ptr, float32 v) +{ + CPU_FloatU u; + u.f = v; + stl_be_p(ptr, u.l); +} + +static inline float64 ldfq_be_p(const void *ptr) +{ + CPU_DoubleU u; + u.ll = ldq_be_p(ptr); + return u.d; +} + +static inline void stfq_be_p(void *ptr, float64 v) +{ + CPU_DoubleU u; + u.d = v; + stq_be_p(ptr, u.ll); +} + +/* Legacy unaligned versions. Note that we never had a complete set. */ + +static inline void cpu_to_le16wu(uint16_t *p, uint16_t v) +{ + stw_le_p(p, v); +} + +static inline void cpu_to_le32wu(uint32_t *p, uint32_t v) +{ + stl_le_p(p, v); +} + +static inline uint16_t le16_to_cpupu(const uint16_t *p) +{ + return lduw_le_p(p); +} + +static inline uint32_t le32_to_cpupu(const uint32_t *p) +{ + return ldl_le_p(p); +} + +static inline uint32_t be32_to_cpupu(const uint32_t *p) +{ + return ldl_be_p(p); +} + +static inline void cpu_to_be16wu(uint16_t *p, uint16_t v) +{ + stw_be_p(p, v); +} + +static inline void cpu_to_be32wu(uint32_t *p, uint32_t v) +{ + stl_be_p(p, v); +} + +static inline void cpu_to_be64wu(uint64_t *p, uint64_t v) +{ + stq_be_p(p, v); +} + +static inline void cpu_to_32wu(uint32_t *p, uint32_t v) +{ + stl_p(p, v); +} + +static inline unsigned long leul_to_cpu(unsigned long v) +{ + /* In order to break an include loop between here and + qemu-common.h, don't rely on HOST_LONG_BITS. */ +#if ULONG_MAX == UINT32_MAX + return le_bswap(v, 32); +#elif ULONG_MAX == UINT64_MAX + return le_bswap(v, 64); +#else +# error Unknown sizeof long +#endif +} + +#undef le_bswap +#undef be_bswap +#undef le_bswaps +#undef be_bswaps + +#endif /* BSWAP_H */ diff --git a/contrib/qemu/include/qemu/compiler.h b/contrib/qemu/include/qemu/compiler.h new file mode 100644 index 00000000000..155b358964a --- /dev/null +++ b/contrib/qemu/include/qemu/compiler.h @@ -0,0 +1,55 @@ +/* public domain */ + +#ifndef COMPILER_H +#define COMPILER_H + +#include "config-host.h" + +/*---------------------------------------------------------------------------- +| The macro QEMU_GNUC_PREREQ tests for minimum version of the GNU C compiler. +| The code is a copy of SOFTFLOAT_GNUC_PREREQ, see softfloat-macros.h. +*----------------------------------------------------------------------------*/ +#if defined(__GNUC__) && defined(__GNUC_MINOR__) +# define QEMU_GNUC_PREREQ(maj, min) \ + ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) +#else +# define QEMU_GNUC_PREREQ(maj, min) 0 +#endif + +#define QEMU_NORETURN __attribute__ ((__noreturn__)) + +#if QEMU_GNUC_PREREQ(3, 4) +#define QEMU_WARN_UNUSED_RESULT __attribute__((warn_unused_result)) +#else +#define QEMU_WARN_UNUSED_RESULT +#endif + +#if defined(_WIN32) +# define QEMU_PACKED __attribute__((gcc_struct, packed)) +#else +# define QEMU_PACKED __attribute__((packed)) +#endif + +#define cat(x,y) x ## y +#define cat2(x,y) cat(x,y) +#define QEMU_BUILD_BUG_ON(x) \ + typedef char cat2(qemu_build_bug_on__,__LINE__)[(x)?-1:1] __attribute__((unused)); + +#if defined __GNUC__ +# if !QEMU_GNUC_PREREQ(4, 4) + /* gcc versions before 4.4.x don't support gnu_printf, so use printf. */ +# define GCC_FMT_ATTR(n, m) __attribute__((format(printf, n, m))) +# else + /* Use gnu_printf when supported (qemu uses standard format strings). */ +# define GCC_FMT_ATTR(n, m) __attribute__((format(gnu_printf, n, m))) +# if defined(_WIN32) + /* Map __printf__ to __gnu_printf__ because we want standard format strings + * even when MinGW or GLib include files use __printf__. */ +# define __printf__ __gnu_printf__ +# endif +# endif +#else +#define GCC_FMT_ATTR(n, m) +#endif + +#endif /* COMPILER_H */ diff --git a/contrib/qemu/include/qemu/error-report.h b/contrib/qemu/include/qemu/error-report.h new file mode 100644 index 00000000000..3b098a91730 --- /dev/null +++ b/contrib/qemu/include/qemu/error-report.h @@ -0,0 +1,46 @@ +/* + * Error reporting + * + * Copyright (C) 2010 Red Hat Inc. + * + * Authors: + * Markus Armbruster <armbru@redhat.com>, + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_ERROR_H +#define QEMU_ERROR_H + +#include <stdarg.h> +#include <stdbool.h> +#include "qemu/compiler.h" + +typedef struct Location { + /* all members are private to qemu-error.c */ + enum { LOC_NONE, LOC_CMDLINE, LOC_FILE } kind; + int num; + const void *ptr; + struct Location *prev; +} Location; + +Location *loc_push_restore(Location *loc); +Location *loc_push_none(Location *loc); +Location *loc_pop(Location *loc); +Location *loc_save(Location *loc); +void loc_restore(Location *loc); +void loc_set_none(void); +void loc_set_cmdline(char **argv, int idx, int cnt); +void loc_set_file(const char *fname, int lno); + +void error_vprintf(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0); +void error_printf(const char *fmt, ...) GCC_FMT_ATTR(1, 2); +void error_printf_unless_qmp(const char *fmt, ...) GCC_FMT_ATTR(1, 2); +void error_print_loc(void); +void error_set_progname(const char *argv0); +void error_report(const char *fmt, ...) GCC_FMT_ATTR(1, 2); +const char *error_get_progname(void); +extern bool enable_timestamp_msg; + +#endif diff --git a/contrib/qemu/include/qemu/event_notifier.h b/contrib/qemu/include/qemu/event_notifier.h new file mode 100644 index 00000000000..88b57af7ce8 --- /dev/null +++ b/contrib/qemu/include/qemu/event_notifier.h @@ -0,0 +1,46 @@ +/* + * event notifier support + * + * Copyright Red Hat, Inc. 2010 + * + * Authors: + * Michael S. Tsirkin <mst@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_EVENT_NOTIFIER_H +#define QEMU_EVENT_NOTIFIER_H + +#include "qemu-common.h" + +#ifdef _WIN32 +#include <windows.h> +#endif + +struct EventNotifier { +#ifdef _WIN32 + HANDLE event; +#else + int rfd; + int wfd; +#endif +}; + +typedef void EventNotifierHandler(EventNotifier *); + +int event_notifier_init(EventNotifier *, int active); +void event_notifier_cleanup(EventNotifier *); +int event_notifier_set(EventNotifier *); +int event_notifier_test_and_clear(EventNotifier *); +int event_notifier_set_handler(EventNotifier *, EventNotifierHandler *); + +#ifdef CONFIG_POSIX +void event_notifier_init_fd(EventNotifier *, int fd); +int event_notifier_get_fd(EventNotifier *); +#else +HANDLE event_notifier_get_handle(EventNotifier *); +#endif + +#endif diff --git a/contrib/qemu/include/qemu/hbitmap.h b/contrib/qemu/include/qemu/hbitmap.h new file mode 100644 index 00000000000..550d7ce2c39 --- /dev/null +++ b/contrib/qemu/include/qemu/hbitmap.h @@ -0,0 +1,209 @@ +/* + * Hierarchical Bitmap Data Type + * + * Copyright Red Hat, Inc., 2012 + * + * Author: Paolo Bonzini <pbonzini@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * later. See the COPYING file in the top-level directory. + */ + +#ifndef HBITMAP_H +#define HBITMAP_H 1 + +#include <limits.h> +#include <stdint.h> +#include <stdbool.h> +#include "bitops.h" +#include "host-utils.h" + +typedef struct HBitmap HBitmap; +typedef struct HBitmapIter HBitmapIter; + +#define BITS_PER_LEVEL (BITS_PER_LONG == 32 ? 5 : 6) + +/* For 32-bit, the largest that fits in a 4 GiB address space. + * For 64-bit, the number of sectors in 1 PiB. Good luck, in + * either case... :) + */ +#define HBITMAP_LOG_MAX_SIZE (BITS_PER_LONG == 32 ? 34 : 41) + +/* We need to place a sentinel in level 0 to speed up iteration. Thus, + * we do this instead of HBITMAP_LOG_MAX_SIZE / BITS_PER_LEVEL. The + * difference is that it allocates an extra level when HBITMAP_LOG_MAX_SIZE + * is an exact multiple of BITS_PER_LEVEL. + */ +#define HBITMAP_LEVELS ((HBITMAP_LOG_MAX_SIZE / BITS_PER_LEVEL) + 1) + +struct HBitmapIter { + const HBitmap *hb; + + /* Copied from hb for access in the inline functions (hb is opaque). */ + int granularity; + + /* Entry offset into the last-level array of longs. */ + size_t pos; + + /* The currently-active path in the tree. Each item of cur[i] stores + * the bits (i.e. the subtrees) yet to be processed under that node. + */ + unsigned long cur[HBITMAP_LEVELS]; +}; + +/** + * hbitmap_alloc: + * @size: Number of bits in the bitmap. + * @granularity: Granularity of the bitmap. Aligned groups of 2^@granularity + * bits will be represented by a single bit. Each operation on a + * range of bits first rounds the bits to determine which group they land + * in, and then affect the entire set; iteration will only visit the first + * bit of each group. + * + * Allocate a new HBitmap. + */ +HBitmap *hbitmap_alloc(uint64_t size, int granularity); + +/** + * hbitmap_empty: + * @hb: HBitmap to operate on. + * + * Return whether the bitmap is empty. + */ +bool hbitmap_empty(const HBitmap *hb); + +/** + * hbitmap_granularity: + * @hb: HBitmap to operate on. + * + * Return the granularity of the HBitmap. + */ +int hbitmap_granularity(const HBitmap *hb); + +/** + * hbitmap_count: + * @hb: HBitmap to operate on. + * + * Return the number of bits set in the HBitmap. + */ +uint64_t hbitmap_count(const HBitmap *hb); + +/** + * hbitmap_set: + * @hb: HBitmap to operate on. + * @start: First bit to set (0-based). + * @count: Number of bits to set. + * + * Set a consecutive range of bits in an HBitmap. + */ +void hbitmap_set(HBitmap *hb, uint64_t start, uint64_t count); + +/** + * hbitmap_reset: + * @hb: HBitmap to operate on. + * @start: First bit to reset (0-based). + * @count: Number of bits to reset. + * + * Reset a consecutive range of bits in an HBitmap. + */ +void hbitmap_reset(HBitmap *hb, uint64_t start, uint64_t count); + +/** + * hbitmap_get: + * @hb: HBitmap to operate on. + * @item: Bit to query (0-based). + * + * Return whether the @item-th bit in an HBitmap is set. + */ +bool hbitmap_get(const HBitmap *hb, uint64_t item); + +/** + * hbitmap_free: + * @hb: HBitmap to operate on. + * + * Free an HBitmap and all of its associated memory. + */ +void hbitmap_free(HBitmap *hb); + +/** + * hbitmap_iter_init: + * @hbi: HBitmapIter to initialize. + * @hb: HBitmap to iterate on. + * @first: First bit to visit (0-based, must be strictly less than the + * size of the bitmap). + * + * Set up @hbi to iterate on the HBitmap @hb. hbitmap_iter_next will return + * the lowest-numbered bit that is set in @hb, starting at @first. + * + * Concurrent setting of bits is acceptable, and will at worst cause the + * iteration to miss some of those bits. Resetting bits before the current + * position of the iterator is also okay. However, concurrent resetting of + * bits can lead to unexpected behavior if the iterator has not yet reached + * those bits. + */ +void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first); + +/* hbitmap_iter_skip_words: + * @hbi: HBitmapIter to operate on. + * + * Internal function used by hbitmap_iter_next and hbitmap_iter_next_word. + */ +unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi); + +/** + * hbitmap_iter_next: + * @hbi: HBitmapIter to operate on. + * + * Return the next bit that is set in @hbi's associated HBitmap, + * or -1 if all remaining bits are zero. + */ +static inline int64_t hbitmap_iter_next(HBitmapIter *hbi) +{ + unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1]; + int64_t item; + + if (cur == 0) { + cur = hbitmap_iter_skip_words(hbi); + if (cur == 0) { + return -1; + } + } + + /* The next call will resume work from the next bit. */ + hbi->cur[HBITMAP_LEVELS - 1] = cur & (cur - 1); + item = ((uint64_t)hbi->pos << BITS_PER_LEVEL) + ctzl(cur); + + return item << hbi->granularity; +} + +/** + * hbitmap_iter_next_word: + * @hbi: HBitmapIter to operate on. + * @p_cur: Location where to store the next non-zero word. + * + * Return the index of the next nonzero word that is set in @hbi's + * associated HBitmap, and set *p_cur to the content of that word + * (bits before the index that was passed to hbitmap_iter_init are + * trimmed on the first call). Return -1, and set *p_cur to zero, + * if all remaining words are zero. + */ +static inline size_t hbitmap_iter_next_word(HBitmapIter *hbi, unsigned long *p_cur) +{ + unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1]; + + if (cur == 0) { + cur = hbitmap_iter_skip_words(hbi); + if (cur == 0) { + *p_cur = 0; + return -1; + } + } + + /* The next call will resume work from the next word. */ + hbi->cur[HBITMAP_LEVELS - 1] = 0; + *p_cur = cur; + return hbi->pos; +} + + +#endif diff --git a/contrib/qemu/include/qemu/host-utils.h b/contrib/qemu/include/qemu/host-utils.h new file mode 100644 index 00000000000..0f688c1c00a --- /dev/null +++ b/contrib/qemu/include/qemu/host-utils.h @@ -0,0 +1,322 @@ +/* + * Utility compute operations used by translated code. + * + * Copyright (c) 2007 Thiemo Seufer + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef HOST_UTILS_H +#define HOST_UTILS_H 1 + +#include "qemu/compiler.h" /* QEMU_GNUC_PREREQ */ +#include <limits.h> + +#ifdef CONFIG_INT128 +static inline void mulu64(uint64_t *plow, uint64_t *phigh, + uint64_t a, uint64_t b) +{ + __uint128_t r = (__uint128_t)a * b; + *plow = r; + *phigh = r >> 64; +} + +static inline void muls64(uint64_t *plow, uint64_t *phigh, + int64_t a, int64_t b) +{ + __int128_t r = (__int128_t)a * b; + *plow = r; + *phigh = r >> 64; +} +#else +void muls64(uint64_t *phigh, uint64_t *plow, int64_t a, int64_t b); +void mulu64(uint64_t *phigh, uint64_t *plow, uint64_t a, uint64_t b); +#endif + +/** + * clz32 - count leading zeros in a 32-bit value. + * @val: The value to search + * + * Returns 32 if the value is zero. Note that the GCC builtin is + * undefined if the value is zero. + */ +static inline int clz32(uint32_t val) +{ +#if QEMU_GNUC_PREREQ(3, 4) + return val ? __builtin_clz(val) : 32; +#else + /* Binary search for the leading one bit. */ + int cnt = 0; + + if (!(val & 0xFFFF0000U)) { + cnt += 16; + val <<= 16; + } + if (!(val & 0xFF000000U)) { + cnt += 8; + val <<= 8; + } + if (!(val & 0xF0000000U)) { + cnt += 4; + val <<= 4; + } + if (!(val & 0xC0000000U)) { + cnt += 2; + val <<= 2; + } + if (!(val & 0x80000000U)) { + cnt++; + val <<= 1; + } + if (!(val & 0x80000000U)) { + cnt++; + } + return cnt; +#endif +} + +/** + * clo32 - count leading ones in a 32-bit value. + * @val: The value to search + * + * Returns 32 if the value is -1. + */ +static inline int clo32(uint32_t val) +{ + return clz32(~val); +} + +/** + * clz64 - count leading zeros in a 64-bit value. + * @val: The value to search + * + * Returns 64 if the value is zero. Note that the GCC builtin is + * undefined if the value is zero. + */ +static inline int clz64(uint64_t val) +{ +#if QEMU_GNUC_PREREQ(3, 4) + return val ? __builtin_clzll(val) : 64; +#else + int cnt = 0; + + if (!(val >> 32)) { + cnt += 32; + } else { + val >>= 32; + } + + return cnt + clz32(val); +#endif +} + +/** + * clo64 - count leading ones in a 64-bit value. + * @val: The value to search + * + * Returns 64 if the value is -1. + */ +static inline int clo64(uint64_t val) +{ + return clz64(~val); +} + +/** + * ctz32 - count trailing zeros in a 32-bit value. + * @val: The value to search + * + * Returns 32 if the value is zero. Note that the GCC builtin is + * undefined if the value is zero. + */ +static inline int ctz32(uint32_t val) +{ +#if QEMU_GNUC_PREREQ(3, 4) + return val ? __builtin_ctz(val) : 32; +#else + /* Binary search for the trailing one bit. */ + int cnt; + + cnt = 0; + if (!(val & 0x0000FFFFUL)) { + cnt += 16; + val >>= 16; + } + if (!(val & 0x000000FFUL)) { + cnt += 8; + val >>= 8; + } + if (!(val & 0x0000000FUL)) { + cnt += 4; + val >>= 4; + } + if (!(val & 0x00000003UL)) { + cnt += 2; + val >>= 2; + } + if (!(val & 0x00000001UL)) { + cnt++; + val >>= 1; + } + if (!(val & 0x00000001UL)) { + cnt++; + } + + return cnt; +#endif +} + +/** + * cto32 - count trailing ones in a 32-bit value. + * @val: The value to search + * + * Returns 32 if the value is -1. + */ +static inline int cto32(uint32_t val) +{ + return ctz32(~val); +} + +/** + * ctz64 - count trailing zeros in a 64-bit value. + * @val: The value to search + * + * Returns 64 if the value is zero. Note that the GCC builtin is + * undefined if the value is zero. + */ +static inline int ctz64(uint64_t val) +{ +#if QEMU_GNUC_PREREQ(3, 4) + return val ? __builtin_ctzll(val) : 64; +#else + int cnt; + + cnt = 0; + if (!((uint32_t)val)) { + cnt += 32; + val >>= 32; + } + + return cnt + ctz32(val); +#endif +} + +/** + * ctz64 - count trailing ones in a 64-bit value. + * @val: The value to search + * + * Returns 64 if the value is -1. + */ +static inline int cto64(uint64_t val) +{ + return ctz64(~val); +} + +/** + * ctpop8 - count the population of one bits in an 8-bit value. + * @val: The value to search + */ +static inline int ctpop8(uint8_t val) +{ +#if QEMU_GNUC_PREREQ(3, 4) + return __builtin_popcount(val); +#else + val = (val & 0x55) + ((val >> 1) & 0x55); + val = (val & 0x33) + ((val >> 2) & 0x33); + val = (val & 0x0f) + ((val >> 4) & 0x0f); + + return val; +#endif +} + +/** + * ctpop16 - count the population of one bits in a 16-bit value. + * @val: The value to search + */ +static inline int ctpop16(uint16_t val) +{ +#if QEMU_GNUC_PREREQ(3, 4) + return __builtin_popcount(val); +#else + val = (val & 0x5555) + ((val >> 1) & 0x5555); + val = (val & 0x3333) + ((val >> 2) & 0x3333); + val = (val & 0x0f0f) + ((val >> 4) & 0x0f0f); + val = (val & 0x00ff) + ((val >> 8) & 0x00ff); + + return val; +#endif +} + +/** + * ctpop32 - count the population of one bits in a 32-bit value. + * @val: The value to search + */ +static inline int ctpop32(uint32_t val) +{ +#if QEMU_GNUC_PREREQ(3, 4) + return __builtin_popcount(val); +#else + val = (val & 0x55555555) + ((val >> 1) & 0x55555555); + val = (val & 0x33333333) + ((val >> 2) & 0x33333333); + val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f); + val = (val & 0x00ff00ff) + ((val >> 8) & 0x00ff00ff); + val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff); + + return val; +#endif +} + +/** + * ctpop64 - count the population of one bits in a 64-bit value. + * @val: The value to search + */ +static inline int ctpop64(uint64_t val) +{ +#if QEMU_GNUC_PREREQ(3, 4) + return __builtin_popcountll(val); +#else + val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL); + val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL); + val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL); + val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) & 0x00ff00ff00ff00ffULL); + val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) & 0x0000ffff0000ffffULL); + val = (val & 0x00000000ffffffffULL) + ((val >> 32) & 0x00000000ffffffffULL); + + return val; +#endif +} + +/* Host type specific sizes of these routines. */ + +#if ULONG_MAX == UINT32_MAX +# define clzl clz32 +# define ctzl ctz32 +# define clol clo32 +# define ctol cto32 +# define ctpopl ctpop32 +#elif ULONG_MAX == UINT64_MAX +# define clzl clz64 +# define ctzl ctz64 +# define clol clo64 +# define ctol cto64 +# define ctpopl ctpop64 +#else +# error Unknown sizeof long +#endif + +#endif diff --git a/contrib/qemu/include/qemu/iov.h b/contrib/qemu/include/qemu/iov.h new file mode 100644 index 00000000000..68d25f29b76 --- /dev/null +++ b/contrib/qemu/include/qemu/iov.h @@ -0,0 +1,115 @@ +/* + * Helpers for using (partial) iovecs. + * + * Copyright (C) 2010 Red Hat, Inc. + * + * Author(s): + * Amit Shah <amit.shah@redhat.com> + * Michael Tokarev <mjt@tls.msk.ru> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#ifndef IOV_H +#define IOV_H + +#include "qemu-common.h" + +/** + * count and return data size, in bytes, of an iovec + * starting at `iov' of `iov_cnt' number of elements. + */ +size_t iov_size(const struct iovec *iov, const unsigned int iov_cnt); + +/** + * Copy from single continuous buffer to scatter-gather vector of buffers + * (iovec) and back like memcpy() between two continuous memory regions. + * Data in single continuous buffer starting at address `buf' and + * `bytes' bytes long will be copied to/from an iovec `iov' with + * `iov_cnt' number of elements, starting at byte position `offset' + * within the iovec. If the iovec does not contain enough space, + * only part of data will be copied, up to the end of the iovec. + * Number of bytes actually copied will be returned, which is + * min(bytes, iov_size(iov)-offset) + * `Offset' must point to the inside of iovec. + * It is okay to use very large value for `bytes' since we're + * limited by the size of the iovec anyway, provided that the + * buffer pointed to by buf has enough space. One possible + * such "large" value is -1 (sinice size_t is unsigned), + * so specifying `-1' as `bytes' means 'up to the end of iovec'. + */ +size_t iov_from_buf(const struct iovec *iov, unsigned int iov_cnt, + size_t offset, const void *buf, size_t bytes); +size_t iov_to_buf(const struct iovec *iov, const unsigned int iov_cnt, + size_t offset, void *buf, size_t bytes); + +/** + * Set data bytes pointed out by iovec `iov' of size `iov_cnt' elements, + * starting at byte offset `start', to value `fillc', repeating it + * `bytes' number of times. `Offset' must point to the inside of iovec. + * If `bytes' is large enough, only last bytes portion of iovec, + * up to the end of it, will be filled with the specified value. + * Function return actual number of bytes processed, which is + * min(size, iov_size(iov) - offset). + * Again, it is okay to use large value for `bytes' to mean "up to the end". + */ +size_t iov_memset(const struct iovec *iov, const unsigned int iov_cnt, + size_t offset, int fillc, size_t bytes); + +/* + * Send/recv data from/to iovec buffers directly + * + * `offset' bytes in the beginning of iovec buffer are skipped and + * next `bytes' bytes are used, which must be within data of iovec. + * + * r = iov_send_recv(sockfd, iov, iovcnt, offset, bytes, true); + * + * is logically equivalent to + * + * char *buf = malloc(bytes); + * iov_to_buf(iov, iovcnt, offset, buf, bytes); + * r = send(sockfd, buf, bytes, 0); + * free(buf); + * + * For iov_send_recv() _whole_ area being sent or received + * should be within the iovec, not only beginning of it. + */ +ssize_t iov_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt, + size_t offset, size_t bytes, bool do_send); +#define iov_recv(sockfd, iov, iov_cnt, offset, bytes) \ + iov_send_recv(sockfd, iov, iov_cnt, offset, bytes, false) +#define iov_send(sockfd, iov, iov_cnt, offset, bytes) \ + iov_send_recv(sockfd, iov, iov_cnt, offset, bytes, true) + +/** + * Produce a text hexdump of iovec `iov' with `iov_cnt' number of elements + * in file `fp', prefixing each line with `prefix' and processing not more + * than `limit' data bytes. + */ +void iov_hexdump(const struct iovec *iov, const unsigned int iov_cnt, + FILE *fp, const char *prefix, size_t limit); + +/* + * Partial copy of vector from iov to dst_iov (data is not copied). + * dst_iov overlaps iov at a specified offset. + * size of dst_iov is at most bytes. dst vector count is returned. + */ +unsigned iov_copy(struct iovec *dst_iov, unsigned int dst_iov_cnt, + const struct iovec *iov, unsigned int iov_cnt, + size_t offset, size_t bytes); + +/* + * Remove a given number of bytes from the front or back of a vector. + * This may update iov and/or iov_cnt to exclude iovec elements that are + * no longer required. + * + * The number of bytes actually discarded is returned. This number may be + * smaller than requested if the vector is too small. + */ +size_t iov_discard_front(struct iovec **iov, unsigned int *iov_cnt, + size_t bytes); +size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt, + size_t bytes); + +#endif diff --git a/contrib/qemu/include/qemu/main-loop.h b/contrib/qemu/include/qemu/main-loop.h new file mode 100644 index 00000000000..6f0200a7acc --- /dev/null +++ b/contrib/qemu/include/qemu/main-loop.h @@ -0,0 +1,311 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef QEMU_MAIN_LOOP_H +#define QEMU_MAIN_LOOP_H 1 + +#include "block/aio.h" + +#define SIG_IPI SIGUSR1 + +/** + * qemu_init_main_loop: Set up the process so that it can run the main loop. + * + * This includes setting up signal handlers. It should be called before + * any other threads are created. In addition, threads other than the + * main one should block signals that are trapped by the main loop. + * For simplicity, you can consider these signals to be safe: SIGUSR1, + * SIGUSR2, thread signals (SIGFPE, SIGILL, SIGSEGV, SIGBUS) and real-time + * signals if available. Remember that Windows in practice does not have + * signals, though. + * + * In the case of QEMU tools, this will also start/initialize timers. + */ +int qemu_init_main_loop(void); + +/** + * main_loop_wait: Run one iteration of the main loop. + * + * If @nonblocking is true, poll for events, otherwise suspend until + * one actually occurs. The main loop usually consists of a loop that + * repeatedly calls main_loop_wait(false). + * + * Main loop services include file descriptor callbacks, bottom halves + * and timers (defined in qemu-timer.h). Bottom halves are similar to timers + * that execute immediately, but have a lower overhead and scheduling them + * is wait-free, thread-safe and signal-safe. + * + * It is sometimes useful to put a whole program in a coroutine. In this + * case, the coroutine actually should be started from within the main loop, + * so that the main loop can run whenever the coroutine yields. To do this, + * you can use a bottom half to enter the coroutine as soon as the main loop + * starts: + * + * void enter_co_bh(void *opaque) { + * QEMUCoroutine *co = opaque; + * qemu_coroutine_enter(co, NULL); + * } + * + * ... + * QEMUCoroutine *co = qemu_coroutine_create(coroutine_entry); + * QEMUBH *start_bh = qemu_bh_new(enter_co_bh, co); + * qemu_bh_schedule(start_bh); + * while (...) { + * main_loop_wait(false); + * } + * + * (In the future we may provide a wrapper for this). + * + * @nonblocking: Whether the caller should block until an event occurs. + */ +int main_loop_wait(int nonblocking); + +/** + * qemu_get_aio_context: Return the main loop's AioContext + */ +AioContext *qemu_get_aio_context(void); + +/** + * qemu_notify_event: Force processing of pending events. + * + * Similar to signaling a condition variable, qemu_notify_event forces + * main_loop_wait to look at pending events and exit. The caller of + * main_loop_wait will usually call it again very soon, so qemu_notify_event + * also has the side effect of recalculating the sets of file descriptors + * that the main loop waits for. + * + * Calling qemu_notify_event is rarely necessary, because main loop + * services (bottom halves and timers) call it themselves. One notable + * exception occurs when using qemu_set_fd_handler2 (see below). + */ +void qemu_notify_event(void); + +#ifdef _WIN32 +/* return TRUE if no sleep should be done afterwards */ +typedef int PollingFunc(void *opaque); + +/** + * qemu_add_polling_cb: Register a Windows-specific polling callback + * + * Currently, under Windows some events are polled rather than waited for. + * Polling callbacks do not ensure that @func is called timely, because + * the main loop might wait for an arbitrarily long time. If possible, + * you should instead create a separate thread that does a blocking poll + * and set a Win32 event object. The event can then be passed to + * qemu_add_wait_object. + * + * Polling callbacks really have nothing Windows specific in them, but + * as they are a hack and are currently not necessary under POSIX systems, + * they are only available when QEMU is running under Windows. + * + * @func: The function that does the polling, and returns 1 to force + * immediate completion of main_loop_wait. + * @opaque: A pointer-size value that is passed to @func. + */ +int qemu_add_polling_cb(PollingFunc *func, void *opaque); + +/** + * qemu_del_polling_cb: Unregister a Windows-specific polling callback + * + * This function removes a callback that was registered with + * qemu_add_polling_cb. + * + * @func: The function that was passed to qemu_add_polling_cb. + * @opaque: A pointer-size value that was passed to qemu_add_polling_cb. + */ +void qemu_del_polling_cb(PollingFunc *func, void *opaque); + +/* Wait objects handling */ +typedef void WaitObjectFunc(void *opaque); + +/** + * qemu_add_wait_object: Register a callback for a Windows handle + * + * Under Windows, the iohandler mechanism can only be used with sockets. + * QEMU must use the WaitForMultipleObjects API to wait on other handles. + * This function registers a #HANDLE with QEMU, so that it will be included + * in the main loop's calls to WaitForMultipleObjects. When the handle + * is in a signaled state, QEMU will call @func. + * + * @handle: The Windows handle to be observed. + * @func: A function to be called when @handle is in a signaled state. + * @opaque: A pointer-size value that is passed to @func. + */ +int qemu_add_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque); + +/** + * qemu_del_wait_object: Unregister a callback for a Windows handle + * + * This function removes a callback that was registered with + * qemu_add_wait_object. + * + * @func: The function that was passed to qemu_add_wait_object. + * @opaque: A pointer-size value that was passed to qemu_add_wait_object. + */ +void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque); +#endif + +/* async I/O support */ + +typedef void IOReadHandler(void *opaque, const uint8_t *buf, int size); +typedef int IOCanReadHandler(void *opaque); + +/** + * qemu_set_fd_handler2: Register a file descriptor with the main loop + * + * This function tells the main loop to wake up whenever one of the + * following conditions is true: + * + * 1) if @fd_write is not %NULL, when the file descriptor is writable; + * + * 2) if @fd_read is not %NULL, when the file descriptor is readable. + * + * @fd_read_poll can be used to disable the @fd_read callback temporarily. + * This is useful to avoid calling qemu_set_fd_handler2 every time the + * client becomes interested in reading (or dually, stops being interested). + * A typical example is when @fd is a listening socket and you want to bound + * the number of active clients. Remember to call qemu_notify_event whenever + * the condition may change from %false to %true. + * + * The callbacks that are set up by qemu_set_fd_handler2 are level-triggered. + * If @fd_read does not read from @fd, or @fd_write does not write to @fd + * until its buffers are full, they will be called again on the next + * iteration. + * + * @fd: The file descriptor to be observed. Under Windows it must be + * a #SOCKET. + * + * @fd_read_poll: A function that returns 1 if the @fd_read callback + * should be fired. If the function returns 0, the main loop will not + * end its iteration even if @fd becomes readable. + * + * @fd_read: A level-triggered callback that is fired if @fd is readable + * at the beginning of a main loop iteration, or if it becomes readable + * during one. + * + * @fd_write: A level-triggered callback that is fired when @fd is writable + * at the beginning of a main loop iteration, or if it becomes writable + * during one. + * + * @opaque: A pointer-sized value that is passed to @fd_read_poll, + * @fd_read and @fd_write. + */ +int qemu_set_fd_handler2(int fd, + IOCanReadHandler *fd_read_poll, + IOHandler *fd_read, + IOHandler *fd_write, + void *opaque); + +/** + * qemu_set_fd_handler: Register a file descriptor with the main loop + * + * This function tells the main loop to wake up whenever one of the + * following conditions is true: + * + * 1) if @fd_write is not %NULL, when the file descriptor is writable; + * + * 2) if @fd_read is not %NULL, when the file descriptor is readable. + * + * The callbacks that are set up by qemu_set_fd_handler are level-triggered. + * If @fd_read does not read from @fd, or @fd_write does not write to @fd + * until its buffers are full, they will be called again on the next + * iteration. + * + * @fd: The file descriptor to be observed. Under Windows it must be + * a #SOCKET. + * + * @fd_read: A level-triggered callback that is fired if @fd is readable + * at the beginning of a main loop iteration, or if it becomes readable + * during one. + * + * @fd_write: A level-triggered callback that is fired when @fd is writable + * at the beginning of a main loop iteration, or if it becomes writable + * during one. + * + * @opaque: A pointer-sized value that is passed to @fd_read and @fd_write. + */ +int qemu_set_fd_handler(int fd, + IOHandler *fd_read, + IOHandler *fd_write, + void *opaque); + +#ifdef CONFIG_POSIX +/** + * qemu_add_child_watch: Register a child process for reaping. + * + * Under POSIX systems, a parent process must read the exit status of + * its child processes using waitpid, or the operating system will not + * free some of the resources attached to that process. + * + * This function directs the QEMU main loop to observe a child process + * and call waitpid as soon as it exits; the watch is then removed + * automatically. It is useful whenever QEMU forks a child process + * but will find out about its termination by other means such as a + * "broken pipe". + * + * @pid: The pid that QEMU should observe. + */ +int qemu_add_child_watch(pid_t pid); +#endif + +/** + * qemu_mutex_lock_iothread: Lock the main loop mutex. + * + * This function locks the main loop mutex. The mutex is taken by + * qemu_init_main_loop and always taken except while waiting on + * external events (such as with select). The mutex should be taken + * by threads other than the main loop thread when calling + * qemu_bh_new(), qemu_set_fd_handler() and basically all other + * functions documented in this file. + * + * NOTE: tools currently are single-threaded and qemu_mutex_lock_iothread + * is a no-op there. + */ +void qemu_mutex_lock_iothread(void); + +/** + * qemu_mutex_unlock_iothread: Unlock the main loop mutex. + * + * This function unlocks the main loop mutex. The mutex is taken by + * qemu_init_main_loop and always taken except while waiting on + * external events (such as with select). The mutex should be unlocked + * as soon as possible by threads other than the main loop thread, + * because it prevents the main loop from processing callbacks, + * including timers and bottom halves. + * + * NOTE: tools currently are single-threaded and qemu_mutex_unlock_iothread + * is a no-op there. + */ +void qemu_mutex_unlock_iothread(void); + +/* internal interfaces */ + +void qemu_fd_register(int fd); +void qemu_iohandler_fill(GArray *pollfds); +void qemu_iohandler_poll(GArray *pollfds, int rc); + +QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque); +void qemu_bh_schedule_idle(QEMUBH *bh); + +#endif diff --git a/contrib/qemu/include/qemu/module.h b/contrib/qemu/include/qemu/module.h new file mode 100644 index 00000000000..c4ccd571664 --- /dev/null +++ b/contrib/qemu/include/qemu/module.h @@ -0,0 +1,40 @@ +/* + * QEMU Module Infrastructure + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_MODULE_H +#define QEMU_MODULE_H + +/* This should not be used directly. Use block_init etc. instead. */ +#define module_init(function, type) \ +static void __attribute__((constructor)) do_qemu_init_ ## function(void) { \ + register_module_init(function, type); \ +} + +typedef enum { + MODULE_INIT_BLOCK, + MODULE_INIT_MACHINE, + MODULE_INIT_QAPI, + MODULE_INIT_QOM, + MODULE_INIT_MAX +} module_init_type; + +#define block_init(function) module_init(function, MODULE_INIT_BLOCK) +#define machine_init(function) module_init(function, MODULE_INIT_MACHINE) +#define qapi_init(function) module_init(function, MODULE_INIT_QAPI) +#define type_init(function) module_init(function, MODULE_INIT_QOM) + +void register_module_init(void (*fn)(void), module_init_type type); + +void module_call_init(module_init_type type); + +#endif diff --git a/contrib/qemu/include/qemu/notify.h b/contrib/qemu/include/qemu/notify.h new file mode 100644 index 00000000000..a3d73e4bc76 --- /dev/null +++ b/contrib/qemu/include/qemu/notify.h @@ -0,0 +1,72 @@ +/* + * Notifier lists + * + * Copyright IBM, Corp. 2010 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_NOTIFY_H +#define QEMU_NOTIFY_H + +#include "qemu/queue.h" + +typedef struct Notifier Notifier; + +struct Notifier +{ + void (*notify)(Notifier *notifier, void *data); + QLIST_ENTRY(Notifier) node; +}; + +typedef struct NotifierList +{ + QLIST_HEAD(, Notifier) notifiers; +} NotifierList; + +#define NOTIFIER_LIST_INITIALIZER(head) \ + { QLIST_HEAD_INITIALIZER((head).notifiers) } + +void notifier_list_init(NotifierList *list); + +void notifier_list_add(NotifierList *list, Notifier *notifier); + +void notifier_remove(Notifier *notifier); + +void notifier_list_notify(NotifierList *list, void *data); + +/* Same as Notifier but allows .notify() to return errors */ +typedef struct NotifierWithReturn NotifierWithReturn; + +struct NotifierWithReturn { + /** + * Return 0 on success (next notifier will be invoked), otherwise + * notifier_with_return_list_notify() will stop and return the value. + */ + int (*notify)(NotifierWithReturn *notifier, void *data); + QLIST_ENTRY(NotifierWithReturn) node; +}; + +typedef struct NotifierWithReturnList { + QLIST_HEAD(, NotifierWithReturn) notifiers; +} NotifierWithReturnList; + +#define NOTIFIER_WITH_RETURN_LIST_INITIALIZER(head) \ + { QLIST_HEAD_INITIALIZER((head).notifiers) } + +void notifier_with_return_list_init(NotifierWithReturnList *list); + +void notifier_with_return_list_add(NotifierWithReturnList *list, + NotifierWithReturn *notifier); + +void notifier_with_return_remove(NotifierWithReturn *notifier); + +int notifier_with_return_list_notify(NotifierWithReturnList *list, + void *data); + +#endif diff --git a/contrib/qemu/include/qemu/option.h b/contrib/qemu/include/qemu/option.h new file mode 100644 index 00000000000..a83c700323e --- /dev/null +++ b/contrib/qemu/include/qemu/option.h @@ -0,0 +1,157 @@ +/* + * Commandline option parsing functions + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2009 Kevin Wolf <kwolf@redhat.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef QEMU_OPTIONS_H +#define QEMU_OPTIONS_H + +#include <stdint.h> +#include "qemu/queue.h" +#include "qapi/error.h" +#include "qapi/qmp/qdict.h" + +enum QEMUOptionParType { + OPT_FLAG, + OPT_NUMBER, + OPT_SIZE, + OPT_STRING, +}; + +typedef struct QEMUOptionParameter { + const char *name; + enum QEMUOptionParType type; + union { + uint64_t n; + char* s; + } value; + const char *help; +} QEMUOptionParameter; + + +const char *get_opt_name(char *buf, int buf_size, const char *p, char delim); +const char *get_opt_value(char *buf, int buf_size, const char *p); +int get_next_param_value(char *buf, int buf_size, + const char *tag, const char **pstr); +int get_param_value(char *buf, int buf_size, + const char *tag, const char *str); + + +/* + * The following functions take a parameter list as input. This is a pointer to + * the first element of a QEMUOptionParameter array which is terminated by an + * entry with entry->name == NULL. + */ + +QEMUOptionParameter *get_option_parameter(QEMUOptionParameter *list, + const char *name); +int set_option_parameter(QEMUOptionParameter *list, const char *name, + const char *value); +int set_option_parameter_int(QEMUOptionParameter *list, const char *name, + uint64_t value); +QEMUOptionParameter *append_option_parameters(QEMUOptionParameter *dest, + QEMUOptionParameter *list); +QEMUOptionParameter *parse_option_parameters(const char *param, + QEMUOptionParameter *list, QEMUOptionParameter *dest); +void free_option_parameters(QEMUOptionParameter *list); +void print_option_parameters(QEMUOptionParameter *list); +void print_option_help(QEMUOptionParameter *list); + +/* ------------------------------------------------------------------ */ + +typedef struct QemuOpt QemuOpt; +typedef struct QemuOpts QemuOpts; +typedef struct QemuOptsList QemuOptsList; + +enum QemuOptType { + QEMU_OPT_STRING = 0, /* no parsing (use string as-is) */ + QEMU_OPT_BOOL, /* on/off */ + QEMU_OPT_NUMBER, /* simple number */ + QEMU_OPT_SIZE, /* size, accepts (K)ilo, (M)ega, (G)iga, (T)era postfix */ +}; + +typedef struct QemuOptDesc { + const char *name; + enum QemuOptType type; + const char *help; +} QemuOptDesc; + +struct QemuOptsList { + const char *name; + const char *implied_opt_name; + bool merge_lists; /* Merge multiple uses of option into a single list? */ + QTAILQ_HEAD(, QemuOpts) head; + QemuOptDesc desc[]; +}; + +const char *qemu_opt_get(QemuOpts *opts, const char *name); +/** + * qemu_opt_has_help_opt: + * @opts: options to search for a help request + * + * Check whether the options specified by @opts include one of the + * standard strings which indicate that the user is asking for a + * list of the valid values for a command line option (as defined + * by is_help_option()). + * + * Returns: true if @opts includes 'help' or equivalent. + */ +bool qemu_opt_has_help_opt(QemuOpts *opts); +bool qemu_opt_get_bool(QemuOpts *opts, const char *name, bool defval); +uint64_t qemu_opt_get_number(QemuOpts *opts, const char *name, uint64_t defval); +uint64_t qemu_opt_get_size(QemuOpts *opts, const char *name, uint64_t defval); +int qemu_opt_set(QemuOpts *opts, const char *name, const char *value); +void qemu_opt_set_err(QemuOpts *opts, const char *name, const char *value, + Error **errp); +int qemu_opt_set_bool(QemuOpts *opts, const char *name, bool val); +int qemu_opt_set_number(QemuOpts *opts, const char *name, int64_t val); +typedef int (*qemu_opt_loopfunc)(const char *name, const char *value, void *opaque); +int qemu_opt_foreach(QemuOpts *opts, qemu_opt_loopfunc func, void *opaque, + int abort_on_failure); + +QemuOpts *qemu_opts_find(QemuOptsList *list, const char *id); +QemuOpts *qemu_opts_create(QemuOptsList *list, const char *id, + int fail_if_exists, Error **errp); +QemuOpts *qemu_opts_create_nofail(QemuOptsList *list); +void qemu_opts_reset(QemuOptsList *list); +void qemu_opts_loc_restore(QemuOpts *opts); +int qemu_opts_set(QemuOptsList *list, const char *id, + const char *name, const char *value); +const char *qemu_opts_id(QemuOpts *opts); +void qemu_opts_del(QemuOpts *opts); +void qemu_opts_validate(QemuOpts *opts, const QemuOptDesc *desc, Error **errp); +int qemu_opts_do_parse(QemuOpts *opts, const char *params, const char *firstname); +QemuOpts *qemu_opts_parse(QemuOptsList *list, const char *params, int permit_abbrev); +void qemu_opts_set_defaults(QemuOptsList *list, const char *params, + int permit_abbrev); +QemuOpts *qemu_opts_from_qdict(QemuOptsList *list, const QDict *qdict, + Error **errp); +QDict *qemu_opts_to_qdict(QemuOpts *opts, QDict *qdict); +void qemu_opts_absorb_qdict(QemuOpts *opts, QDict *qdict, Error **errp); + +typedef int (*qemu_opts_loopfunc)(QemuOpts *opts, void *opaque); +int qemu_opts_print(QemuOpts *opts, void *dummy); +int qemu_opts_foreach(QemuOptsList *list, qemu_opts_loopfunc func, void *opaque, + int abort_on_failure); + +#endif diff --git a/contrib/qemu/include/qemu/option_int.h b/contrib/qemu/include/qemu/option_int.h new file mode 100644 index 00000000000..8212fa4a485 --- /dev/null +++ b/contrib/qemu/include/qemu/option_int.h @@ -0,0 +1,54 @@ +/* + * Commandline option parsing functions + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2009 Kevin Wolf <kwolf@redhat.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef QEMU_OPTIONS_INTERNAL_H +#define QEMU_OPTIONS_INTERNAL_H + +#include "qemu/option.h" +#include "qemu/error-report.h" + +struct QemuOpt { + const char *name; + const char *str; + + const QemuOptDesc *desc; + union { + bool boolean; + uint64_t uint; + } value; + + QemuOpts *opts; + QTAILQ_ENTRY(QemuOpt) next; +}; + +struct QemuOpts { + char *id; + QemuOptsList *list; + Location loc; + QTAILQ_HEAD(QemuOptHead, QemuOpt) head; + QTAILQ_ENTRY(QemuOpts) next; +}; + +#endif diff --git a/contrib/qemu/include/qemu/osdep.h b/contrib/qemu/include/qemu/osdep.h new file mode 100644 index 00000000000..26136f16ecd --- /dev/null +++ b/contrib/qemu/include/qemu/osdep.h @@ -0,0 +1,218 @@ +#ifndef QEMU_OSDEP_H +#define QEMU_OSDEP_H + +#include "config-host.h" +#include <stdarg.h> +#include <stddef.h> +#include <stdbool.h> +#include <sys/types.h> +#ifdef __OpenBSD__ +#include <sys/signal.h> +#endif + +#ifndef _WIN32 +#include <sys/wait.h> +#else +#define WIFEXITED(x) 1 +#define WEXITSTATUS(x) (x) +#endif + +#include <sys/time.h> + +#if defined(CONFIG_SOLARIS) && CONFIG_SOLARIS_VERSION < 10 +/* [u]int_fast*_t not in <sys/int_types.h> */ +typedef unsigned char uint_fast8_t; +typedef unsigned int uint_fast16_t; +typedef signed int int_fast16_t; +#endif + +#ifndef glue +#define xglue(x, y) x ## y +#define glue(x, y) xglue(x, y) +#define stringify(s) tostring(s) +#define tostring(s) #s +#endif + +#ifndef likely +#if __GNUC__ < 3 +#define __builtin_expect(x, n) (x) +#endif + +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) +#endif + +#ifndef container_of +#define container_of(ptr, type, member) ({ \ + const typeof(((type *) 0)->member) *__mptr = (ptr); \ + (type *) ((char *) __mptr - offsetof(type, member));}) +#endif + +/* Convert from a base type to a parent type, with compile time checking. */ +#ifdef __GNUC__ +#define DO_UPCAST(type, field, dev) ( __extension__ ( { \ + char __attribute__((unused)) offset_must_be_zero[ \ + -offsetof(type, field)]; \ + container_of(dev, type, field);})) +#else +#define DO_UPCAST(type, field, dev) container_of(dev, type, field) +#endif + +#define typeof_field(type, field) typeof(((type *)0)->field) +#define type_check(t1,t2) ((t1*)0 - (t2*)0) + +#ifndef MIN +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif +#ifndef MAX +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif + +#ifndef ROUND_UP +#define ROUND_UP(n,d) (((n) + (d) - 1) & -(d)) +#endif + +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef always_inline +#if !((__GNUC__ < 3) || defined(__APPLE__)) +#ifdef __OPTIMIZE__ +#undef inline +#define inline __attribute__ (( always_inline )) __inline__ +#endif +#endif +#else +#undef inline +#define inline always_inline +#endif + +#define qemu_printf printf + +int qemu_daemon(int nochdir, int noclose); +void *qemu_memalign(size_t alignment, size_t size); +void *qemu_anon_ram_alloc(size_t size); +void qemu_vfree(void *ptr); +void qemu_anon_ram_free(void *ptr, size_t size); + +#define QEMU_MADV_INVALID -1 + +#if defined(CONFIG_MADVISE) + +#define QEMU_MADV_WILLNEED MADV_WILLNEED +#define QEMU_MADV_DONTNEED MADV_DONTNEED +#ifdef MADV_DONTFORK +#define QEMU_MADV_DONTFORK MADV_DONTFORK +#else +#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID +#endif +#ifdef MADV_MERGEABLE +#define QEMU_MADV_MERGEABLE MADV_MERGEABLE +#else +#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID +#endif +#ifdef MADV_DONTDUMP +#define QEMU_MADV_DONTDUMP MADV_DONTDUMP +#else +#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID +#endif +#ifdef MADV_HUGEPAGE +#define QEMU_MADV_HUGEPAGE MADV_HUGEPAGE +#else +#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID +#endif + +#elif defined(CONFIG_POSIX_MADVISE) + +#define QEMU_MADV_WILLNEED POSIX_MADV_WILLNEED +#define QEMU_MADV_DONTNEED POSIX_MADV_DONTNEED +#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID +#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID +#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID +#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID + +#else /* no-op */ + +#define QEMU_MADV_WILLNEED QEMU_MADV_INVALID +#define QEMU_MADV_DONTNEED QEMU_MADV_INVALID +#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID +#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID +#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID +#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID + +#endif + +int qemu_madvise(void *addr, size_t len, int advice); + +int qemu_open(const char *name, int flags, ...); +int qemu_close(int fd); + +#if defined(__HAIKU__) && defined(__i386__) +#define FMT_pid "%ld" +#elif defined(WIN64) +#define FMT_pid "%" PRId64 +#else +#define FMT_pid "%d" +#endif + +int qemu_create_pidfile(const char *filename); +int qemu_get_thread_id(void); + +#ifndef CONFIG_IOVEC +struct iovec { + void *iov_base; + size_t iov_len; +}; +/* + * Use the same value as Linux for now. + */ +#define IOV_MAX 1024 + +ssize_t readv(int fd, const struct iovec *iov, int iov_cnt); +ssize_t writev(int fd, const struct iovec *iov, int iov_cnt); +#else +#include <sys/uio.h> +#endif + +#ifdef _WIN32 +static inline void qemu_timersub(const struct timeval *val1, + const struct timeval *val2, + struct timeval *res) +{ + res->tv_sec = val1->tv_sec - val2->tv_sec; + if (val1->tv_usec < val2->tv_usec) { + res->tv_sec--; + res->tv_usec = val1->tv_usec - val2->tv_usec + 1000 * 1000; + } else { + res->tv_usec = val1->tv_usec - val2->tv_usec; + } +} +#else +#define qemu_timersub timersub +#endif + +void qemu_set_cloexec(int fd); + +void qemu_set_version(const char *); +const char *qemu_get_version(void); + +void fips_set_state(bool requested); +bool fips_get_state(void); + +/* Return a dynamically allocated pathname denoting a file or directory that is + * appropriate for storing local state. + * + * @relative_pathname need not start with a directory separator; one will be + * added automatically. + * + * The caller is responsible for releasing the value returned with g_free() + * after use. + */ +char *qemu_get_local_state_pathname(const char *relative_pathname); + +#endif diff --git a/contrib/qemu/include/qemu/queue.h b/contrib/qemu/include/qemu/queue.h new file mode 100644 index 00000000000..d433b9017ce --- /dev/null +++ b/contrib/qemu/include/qemu/queue.h @@ -0,0 +1,414 @@ +/* $NetBSD: queue.h,v 1.52 2009/04/20 09:56:08 mschuett Exp $ */ + +/* + * QEMU version: Copy from netbsd, removed debug code, removed some of + * the implementations. Left in singly-linked lists, lists, simple + * queues, and tail queues. + */ + +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)queue.h 8.5 (Berkeley) 8/20/94 + */ + +#ifndef QEMU_SYS_QUEUE_H_ +#define QEMU_SYS_QUEUE_H_ + +/* + * This file defines four types of data structures: singly-linked lists, + * lists, simple queues, and tail queues. + * + * A singly-linked list is headed by a single forward pointer. The + * elements are singly linked for minimum space and pointer manipulation + * overhead at the expense of O(n) removal for arbitrary elements. New + * elements can be added to the list after an existing element or at the + * head of the list. Elements being removed from the head of the list + * should use the explicit macro for this purpose for optimum + * efficiency. A singly-linked list may only be traversed in the forward + * direction. Singly-linked lists are ideal for applications with large + * datasets and few or no removals or for implementing a LIFO queue. + * + * A list is headed by a single forward pointer (or an array of forward + * pointers for a hash table header). The elements are doubly linked + * so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before + * or after an existing element or at the head of the list. A list + * may only be traversed in the forward direction. + * + * A simple queue is headed by a pair of pointers, one the head of the + * list and the other to the tail of the list. The elements are singly + * linked to save space, so elements can only be removed from the + * head of the list. New elements can be added to the list after + * an existing element, at the head of the list, or at the end of the + * list. A simple queue may only be traversed in the forward direction. + * + * A tail queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or + * after an existing element, at the head of the list, or at the end of + * the list. A tail queue may be traversed in either direction. + * + * For details on the use of these macros, see the queue(3) manual page. + */ + +#include "qemu/atomic.h" /* for smp_wmb() */ + +/* + * List definitions. + */ +#define QLIST_HEAD(name, type) \ +struct name { \ + struct type *lh_first; /* first element */ \ +} + +#define QLIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define QLIST_ENTRY(type) \ +struct { \ + struct type *le_next; /* next element */ \ + struct type **le_prev; /* address of previous next element */ \ +} + +/* + * List functions. + */ +#define QLIST_INIT(head) do { \ + (head)->lh_first = NULL; \ +} while (/*CONSTCOND*/0) + +#define QLIST_INSERT_AFTER(listelm, elm, field) do { \ + if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ + (listelm)->field.le_next->field.le_prev = \ + &(elm)->field.le_next; \ + (listelm)->field.le_next = (elm); \ + (elm)->field.le_prev = &(listelm)->field.le_next; \ +} while (/*CONSTCOND*/0) + +#define QLIST_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.le_prev = (listelm)->field.le_prev; \ + (elm)->field.le_next = (listelm); \ + *(listelm)->field.le_prev = (elm); \ + (listelm)->field.le_prev = &(elm)->field.le_next; \ +} while (/*CONSTCOND*/0) + +#define QLIST_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.le_next = (head)->lh_first) != NULL) \ + (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ + (head)->lh_first = (elm); \ + (elm)->field.le_prev = &(head)->lh_first; \ +} while (/*CONSTCOND*/0) + +#define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \ + (elm)->field.le_prev = &(head)->lh_first; \ + (elm)->field.le_next = (head)->lh_first; \ + smp_wmb(); /* fill elm before linking it */ \ + if ((head)->lh_first != NULL) { \ + (head)->lh_first->field.le_prev = &(elm)->field.le_next; \ + } \ + (head)->lh_first = (elm); \ + smp_wmb(); \ +} while (/* CONSTCOND*/0) + +#define QLIST_REMOVE(elm, field) do { \ + if ((elm)->field.le_next != NULL) \ + (elm)->field.le_next->field.le_prev = \ + (elm)->field.le_prev; \ + *(elm)->field.le_prev = (elm)->field.le_next; \ +} while (/*CONSTCOND*/0) + +#define QLIST_FOREACH(var, head, field) \ + for ((var) = ((head)->lh_first); \ + (var); \ + (var) = ((var)->field.le_next)) + +#define QLIST_FOREACH_SAFE(var, head, field, next_var) \ + for ((var) = ((head)->lh_first); \ + (var) && ((next_var) = ((var)->field.le_next), 1); \ + (var) = (next_var)) + +/* + * List access methods. + */ +#define QLIST_EMPTY(head) ((head)->lh_first == NULL) +#define QLIST_FIRST(head) ((head)->lh_first) +#define QLIST_NEXT(elm, field) ((elm)->field.le_next) + + +/* + * Singly-linked List definitions. + */ +#define QSLIST_HEAD(name, type) \ +struct name { \ + struct type *slh_first; /* first element */ \ +} + +#define QSLIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define QSLIST_ENTRY(type) \ +struct { \ + struct type *sle_next; /* next element */ \ +} + +/* + * Singly-linked List functions. + */ +#define QSLIST_INIT(head) do { \ + (head)->slh_first = NULL; \ +} while (/*CONSTCOND*/0) + +#define QSLIST_INSERT_AFTER(slistelm, elm, field) do { \ + (elm)->field.sle_next = (slistelm)->field.sle_next; \ + (slistelm)->field.sle_next = (elm); \ +} while (/*CONSTCOND*/0) + +#define QSLIST_INSERT_HEAD(head, elm, field) do { \ + (elm)->field.sle_next = (head)->slh_first; \ + (head)->slh_first = (elm); \ +} while (/*CONSTCOND*/0) + +#define QSLIST_REMOVE_HEAD(head, field) do { \ + (head)->slh_first = (head)->slh_first->field.sle_next; \ +} while (/*CONSTCOND*/0) + +#define QSLIST_REMOVE_AFTER(slistelm, field) do { \ + (slistelm)->field.sle_next = \ + QSLIST_NEXT(QSLIST_NEXT((slistelm), field), field); \ +} while (/*CONSTCOND*/0) + +#define QSLIST_FOREACH(var, head, field) \ + for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next) + +#define QSLIST_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = QSLIST_FIRST((head)); \ + (var) && ((tvar) = QSLIST_NEXT((var), field), 1); \ + (var) = (tvar)) + +/* + * Singly-linked List access methods. + */ +#define QSLIST_EMPTY(head) ((head)->slh_first == NULL) +#define QSLIST_FIRST(head) ((head)->slh_first) +#define QSLIST_NEXT(elm, field) ((elm)->field.sle_next) + + +/* + * Simple queue definitions. + */ +#define QSIMPLEQ_HEAD(name, type) \ +struct name { \ + struct type *sqh_first; /* first element */ \ + struct type **sqh_last; /* addr of last next element */ \ +} + +#define QSIMPLEQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).sqh_first } + +#define QSIMPLEQ_ENTRY(type) \ +struct { \ + struct type *sqe_next; /* next element */ \ +} + +/* + * Simple queue functions. + */ +#define QSIMPLEQ_INIT(head) do { \ + (head)->sqh_first = NULL; \ + (head)->sqh_last = &(head)->sqh_first; \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (head)->sqh_first = (elm); \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.sqe_next = NULL; \ + *(head)->sqh_last = (elm); \ + (head)->sqh_last = &(elm)->field.sqe_next; \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL) \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (listelm)->field.sqe_next = (elm); \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_REMOVE_HEAD(head, field) do { \ + if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL)\ + (head)->sqh_last = &(head)->sqh_first; \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_REMOVE(head, elm, type, field) do { \ + if ((head)->sqh_first == (elm)) { \ + QSIMPLEQ_REMOVE_HEAD((head), field); \ + } else { \ + struct type *curelm = (head)->sqh_first; \ + while (curelm->field.sqe_next != (elm)) \ + curelm = curelm->field.sqe_next; \ + if ((curelm->field.sqe_next = \ + curelm->field.sqe_next->field.sqe_next) == NULL) \ + (head)->sqh_last = &(curelm)->field.sqe_next; \ + } \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_FOREACH(var, head, field) \ + for ((var) = ((head)->sqh_first); \ + (var); \ + (var) = ((var)->field.sqe_next)) + +#define QSIMPLEQ_FOREACH_SAFE(var, head, field, next) \ + for ((var) = ((head)->sqh_first); \ + (var) && ((next = ((var)->field.sqe_next)), 1); \ + (var) = (next)) + +#define QSIMPLEQ_CONCAT(head1, head2) do { \ + if (!QSIMPLEQ_EMPTY((head2))) { \ + *(head1)->sqh_last = (head2)->sqh_first; \ + (head1)->sqh_last = (head2)->sqh_last; \ + QSIMPLEQ_INIT((head2)); \ + } \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_LAST(head, type, field) \ + (QSIMPLEQ_EMPTY((head)) ? \ + NULL : \ + ((struct type *)(void *) \ + ((char *)((head)->sqh_last) - offsetof(struct type, field)))) + +/* + * Simple queue access methods. + */ +#define QSIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL) +#define QSIMPLEQ_FIRST(head) ((head)->sqh_first) +#define QSIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) + + +/* + * Tail queue definitions. + */ +#define Q_TAILQ_HEAD(name, type, qual) \ +struct name { \ + qual type *tqh_first; /* first element */ \ + qual type *qual *tqh_last; /* addr of last next element */ \ +} +#define QTAILQ_HEAD(name, type) Q_TAILQ_HEAD(name, struct type,) + +#define QTAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).tqh_first } + +#define Q_TAILQ_ENTRY(type, qual) \ +struct { \ + qual type *tqe_next; /* next element */ \ + qual type *qual *tqe_prev; /* address of previous next element */\ +} +#define QTAILQ_ENTRY(type) Q_TAILQ_ENTRY(struct type,) + +/* + * Tail queue functions. + */ +#define QTAILQ_INIT(head) do { \ + (head)->tqh_first = NULL; \ + (head)->tqh_last = &(head)->tqh_first; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ + (head)->tqh_first->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (head)->tqh_first = (elm); \ + (elm)->field.tqe_prev = &(head)->tqh_first; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.tqe_next = NULL; \ + (elm)->field.tqe_prev = (head)->tqh_last; \ + *(head)->tqh_last = (elm); \ + (head)->tqh_last = &(elm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ + (elm)->field.tqe_next->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (listelm)->field.tqe_next = (elm); \ + (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ + (elm)->field.tqe_next = (listelm); \ + *(listelm)->field.tqe_prev = (elm); \ + (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_REMOVE(head, elm, field) do { \ + if (((elm)->field.tqe_next) != NULL) \ + (elm)->field.tqe_next->field.tqe_prev = \ + (elm)->field.tqe_prev; \ + else \ + (head)->tqh_last = (elm)->field.tqe_prev; \ + *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_FOREACH(var, head, field) \ + for ((var) = ((head)->tqh_first); \ + (var); \ + (var) = ((var)->field.tqe_next)) + +#define QTAILQ_FOREACH_SAFE(var, head, field, next_var) \ + for ((var) = ((head)->tqh_first); \ + (var) && ((next_var) = ((var)->field.tqe_next), 1); \ + (var) = (next_var)) + +#define QTAILQ_FOREACH_REVERSE(var, head, headname, field) \ + for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \ + (var); \ + (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) + +/* + * Tail queue access methods. + */ +#define QTAILQ_EMPTY(head) ((head)->tqh_first == NULL) +#define QTAILQ_FIRST(head) ((head)->tqh_first) +#define QTAILQ_NEXT(elm, field) ((elm)->field.tqe_next) + +#define QTAILQ_LAST(head, headname) \ + (*(((struct headname *)((head)->tqh_last))->tqh_last)) +#define QTAILQ_PREV(elm, headname, field) \ + (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) + +#endif /* !QEMU_SYS_QUEUE_H_ */ diff --git a/contrib/qemu/include/qemu/sockets.h b/contrib/qemu/include/qemu/sockets.h new file mode 100644 index 00000000000..c5174d76a70 --- /dev/null +++ b/contrib/qemu/include/qemu/sockets.h @@ -0,0 +1,83 @@ +/* headers to use the BSD sockets */ +#ifndef QEMU_SOCKET_H +#define QEMU_SOCKET_H + +#ifdef _WIN32 +#include <windows.h> +#include <winsock2.h> +#include <ws2tcpip.h> + +#define socket_error() WSAGetLastError() + +int inet_aton(const char *cp, struct in_addr *ia); + +#else + +#include <sys/types.h> +#include <sys/socket.h> +#include <netinet/in.h> +#include <netinet/tcp.h> +#include <arpa/inet.h> +#include <netdb.h> +#include <sys/un.h> + +#define socket_error() errno +#define closesocket(s) close(s) + +#endif /* !_WIN32 */ + +#include "qemu/option.h" +#include "qapi/error.h" +#include "qapi/qmp/qerror.h" + +extern QemuOptsList socket_optslist; + +/* misc helpers */ +int qemu_socket(int domain, int type, int protocol); +int qemu_accept(int s, struct sockaddr *addr, socklen_t *addrlen); +int socket_set_cork(int fd, int v); +int socket_set_nodelay(int fd); +void qemu_set_block(int fd); +void qemu_set_nonblock(int fd); +int send_all(int fd, const void *buf, int len1); +int recv_all(int fd, void *buf, int len1, bool single_read); + +/* callback function for nonblocking connect + * valid fd on success, negative error code on failure + */ +typedef void NonBlockingConnectHandler(int fd, void *opaque); + +InetSocketAddress *inet_parse(const char *str, Error **errp); +int inet_listen_opts(QemuOpts *opts, int port_offset, Error **errp); +int inet_listen(const char *str, char *ostr, int olen, + int socktype, int port_offset, Error **errp); +int inet_connect_opts(QemuOpts *opts, Error **errp, + NonBlockingConnectHandler *callback, void *opaque); +int inet_connect(const char *str, Error **errp); +int inet_nonblocking_connect(const char *str, + NonBlockingConnectHandler *callback, + void *opaque, Error **errp); + +int inet_dgram_opts(QemuOpts *opts, Error **errp); +const char *inet_strfamily(int family); + +int unix_listen_opts(QemuOpts *opts, Error **errp); +int unix_listen(const char *path, char *ostr, int olen, Error **errp); +int unix_connect_opts(QemuOpts *opts, Error **errp, + NonBlockingConnectHandler *callback, void *opaque); +int unix_connect(const char *path, Error **errp); +int unix_nonblocking_connect(const char *str, + NonBlockingConnectHandler *callback, + void *opaque, Error **errp); + +SocketAddress *socket_parse(const char *str, Error **errp); +int socket_connect(SocketAddress *addr, Error **errp, + NonBlockingConnectHandler *callback, void *opaque); +int socket_listen(SocketAddress *addr, Error **errp); +int socket_dgram(SocketAddress *remote, SocketAddress *local, Error **errp); + +/* Old, ipv4 only bits. Don't use for new code. */ +int parse_host_port(struct sockaddr_in *saddr, const char *str); +int socket_init(void); + +#endif /* QEMU_SOCKET_H */ diff --git a/contrib/qemu/include/qemu/thread-posix.h b/contrib/qemu/include/qemu/thread-posix.h new file mode 100644 index 00000000000..0f30dccb53c --- /dev/null +++ b/contrib/qemu/include/qemu/thread-posix.h @@ -0,0 +1,28 @@ +#ifndef __QEMU_THREAD_POSIX_H +#define __QEMU_THREAD_POSIX_H 1 +#include "pthread.h" +#include <semaphore.h> + +struct QemuMutex { + pthread_mutex_t lock; +}; + +struct QemuCond { + pthread_cond_t cond; +}; + +struct QemuSemaphore { +#if defined(__APPLE__) || defined(__NetBSD__) + pthread_mutex_t lock; + pthread_cond_t cond; + int count; +#else + sem_t sem; +#endif +}; + +struct QemuThread { + pthread_t thread; +}; + +#endif diff --git a/contrib/qemu/include/qemu/thread.h b/contrib/qemu/include/qemu/thread.h new file mode 100644 index 00000000000..c02404b9fbf --- /dev/null +++ b/contrib/qemu/include/qemu/thread.h @@ -0,0 +1,56 @@ +#ifndef __QEMU_THREAD_H +#define __QEMU_THREAD_H 1 + +#include <inttypes.h> +#include <stdbool.h> + +typedef struct QemuMutex QemuMutex; +typedef struct QemuCond QemuCond; +typedef struct QemuSemaphore QemuSemaphore; +typedef struct QemuThread QemuThread; + +#ifdef _WIN32 +#include "qemu/thread-win32.h" +#else +#include "qemu/thread-posix.h" +#endif + +#define QEMU_THREAD_JOINABLE 0 +#define QEMU_THREAD_DETACHED 1 + +void qemu_mutex_init(QemuMutex *mutex); +void qemu_mutex_destroy(QemuMutex *mutex); +void qemu_mutex_lock(QemuMutex *mutex); +int qemu_mutex_trylock(QemuMutex *mutex); +void qemu_mutex_unlock(QemuMutex *mutex); + +#define rcu_read_lock() do { } while (0) +#define rcu_read_unlock() do { } while (0) + +void qemu_cond_init(QemuCond *cond); +void qemu_cond_destroy(QemuCond *cond); + +/* + * IMPORTANT: The implementation does not guarantee that pthread_cond_signal + * and pthread_cond_broadcast can be called except while the same mutex is + * held as in the corresponding pthread_cond_wait calls! + */ +void qemu_cond_signal(QemuCond *cond); +void qemu_cond_broadcast(QemuCond *cond); +void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex); + +void qemu_sem_init(QemuSemaphore *sem, int init); +void qemu_sem_post(QemuSemaphore *sem); +void qemu_sem_wait(QemuSemaphore *sem); +int qemu_sem_timedwait(QemuSemaphore *sem, int ms); +void qemu_sem_destroy(QemuSemaphore *sem); + +void qemu_thread_create(QemuThread *thread, + void *(*start_routine)(void *), + void *arg, int mode); +void *qemu_thread_join(QemuThread *thread); +void qemu_thread_get_self(QemuThread *thread); +bool qemu_thread_is_self(QemuThread *thread); +void qemu_thread_exit(void *retval); + +#endif diff --git a/contrib/qemu/include/qemu/timer.h b/contrib/qemu/include/qemu/timer.h new file mode 100644 index 00000000000..9dd206ce7f4 --- /dev/null +++ b/contrib/qemu/include/qemu/timer.h @@ -0,0 +1,305 @@ +#ifndef QEMU_TIMER_H +#define QEMU_TIMER_H + +#include "qemu-common.h" +#include "qemu/main-loop.h" +#include "qemu/notify.h" + +/* timers */ + +#define SCALE_MS 1000000 +#define SCALE_US 1000 +#define SCALE_NS 1 + +typedef struct QEMUClock QEMUClock; +typedef void QEMUTimerCB(void *opaque); + +/* The real time clock should be used only for stuff which does not + change the virtual machine state, as it is run even if the virtual + machine is stopped. The real time clock has a frequency of 1000 + Hz. */ +extern QEMUClock *rt_clock; + +/* The virtual clock is only run during the emulation. It is stopped + when the virtual machine is stopped. Virtual timers use a high + precision clock, usually cpu cycles (use ticks_per_sec). */ +extern QEMUClock *vm_clock; + +/* The host clock should be use for device models that emulate accurate + real time sources. It will continue to run when the virtual machine + is suspended, and it will reflect system time changes the host may + undergo (e.g. due to NTP). The host clock has the same precision as + the virtual clock. */ +extern QEMUClock *host_clock; + +int64_t qemu_get_clock_ns(QEMUClock *clock); +int64_t qemu_clock_has_timers(QEMUClock *clock); +int64_t qemu_clock_expired(QEMUClock *clock); +int64_t qemu_clock_deadline(QEMUClock *clock); +void qemu_clock_enable(QEMUClock *clock, bool enabled); +void qemu_clock_warp(QEMUClock *clock); + +void qemu_register_clock_reset_notifier(QEMUClock *clock, Notifier *notifier); +void qemu_unregister_clock_reset_notifier(QEMUClock *clock, + Notifier *notifier); + +QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale, + QEMUTimerCB *cb, void *opaque); +void qemu_free_timer(QEMUTimer *ts); +void qemu_del_timer(QEMUTimer *ts); +void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time); +void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time); +bool qemu_timer_pending(QEMUTimer *ts); +bool qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time); +uint64_t qemu_timer_expire_time_ns(QEMUTimer *ts); + +void qemu_run_timers(QEMUClock *clock); +void qemu_run_all_timers(void); +void configure_alarms(char const *opt); +void init_clocks(void); +int init_timer_alarm(void); + +int64_t cpu_get_ticks(void); +void cpu_enable_ticks(void); +void cpu_disable_ticks(void); + +static inline QEMUTimer *qemu_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb, + void *opaque) +{ + return qemu_new_timer(clock, SCALE_NS, cb, opaque); +} + +static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb, + void *opaque) +{ + return qemu_new_timer(clock, SCALE_MS, cb, opaque); +} + +static inline int64_t qemu_get_clock_ms(QEMUClock *clock) +{ + return qemu_get_clock_ns(clock) / SCALE_MS; +} + +static inline int64_t get_ticks_per_sec(void) +{ + return 1000000000LL; +} + +/* real time host monotonic timer */ +static inline int64_t get_clock_realtime(void) +{ + struct timeval tv; + + gettimeofday(&tv, NULL); + return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000); +} + +/* Warning: don't insert tracepoints into these functions, they are + also used by simpletrace backend and tracepoints would cause + an infinite recursion! */ +#ifdef _WIN32 +extern int64_t clock_freq; + +static inline int64_t get_clock(void) +{ + LARGE_INTEGER ti; + QueryPerformanceCounter(&ti); + return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq); +} + +#else + +extern int use_rt_clock; + +static inline int64_t get_clock(void) +{ +#ifdef CLOCK_MONOTONIC + if (use_rt_clock) { + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return ts.tv_sec * 1000000000LL + ts.tv_nsec; + } else +#endif + { + /* XXX: using gettimeofday leads to problems if the date + changes, so it should be avoided. */ + return get_clock_realtime(); + } +} +#endif + +void qemu_get_timer(QEMUFile *f, QEMUTimer *ts); +void qemu_put_timer(QEMUFile *f, QEMUTimer *ts); + +/* icount */ +int64_t cpu_get_icount(void); +int64_t cpu_get_clock(void); + +/*******************************************/ +/* host CPU ticks (if available) */ + +#if defined(_ARCH_PPC) + +static inline int64_t cpu_get_real_ticks(void) +{ + int64_t retval; +#ifdef _ARCH_PPC64 + /* This reads timebase in one 64bit go and includes Cell workaround from: + http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html + */ + __asm__ __volatile__ ("mftb %0\n\t" + "cmpwi %0,0\n\t" + "beq- $-8" + : "=r" (retval)); +#else + /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */ + unsigned long junk; + __asm__ __volatile__ ("mfspr %1,269\n\t" /* mftbu */ + "mfspr %L0,268\n\t" /* mftb */ + "mfspr %0,269\n\t" /* mftbu */ + "cmpw %0,%1\n\t" + "bne $-16" + : "=r" (retval), "=r" (junk)); +#endif + return retval; +} + +#elif defined(__i386__) + +static inline int64_t cpu_get_real_ticks(void) +{ + int64_t val; + asm volatile ("rdtsc" : "=A" (val)); + return val; +} + +#elif defined(__x86_64__) + +static inline int64_t cpu_get_real_ticks(void) +{ + uint32_t low,high; + int64_t val; + asm volatile("rdtsc" : "=a" (low), "=d" (high)); + val = high; + val <<= 32; + val |= low; + return val; +} + +#elif defined(__hppa__) + +static inline int64_t cpu_get_real_ticks(void) +{ + int val; + asm volatile ("mfctl %%cr16, %0" : "=r"(val)); + return val; +} + +#elif defined(__ia64) + +static inline int64_t cpu_get_real_ticks(void) +{ + int64_t val; + asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory"); + return val; +} + +#elif defined(__s390__) + +static inline int64_t cpu_get_real_ticks(void) +{ + int64_t val; + asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc"); + return val; +} + +#elif defined(__sparc__) + +static inline int64_t cpu_get_real_ticks (void) +{ +#if defined(_LP64) + uint64_t rval; + asm volatile("rd %%tick,%0" : "=r"(rval)); + return rval; +#else + /* We need an %o or %g register for this. For recent enough gcc + there is an "h" constraint for that. Don't bother with that. */ + union { + uint64_t i64; + struct { + uint32_t high; + uint32_t low; + } i32; + } rval; + asm volatile("rd %%tick,%%g1; srlx %%g1,32,%0; mov %%g1,%1" + : "=r"(rval.i32.high), "=r"(rval.i32.low) : : "g1"); + return rval.i64; +#endif +} + +#elif defined(__mips__) && \ + ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__)) +/* + * binutils wants to use rdhwr only on mips32r2 + * but as linux kernel emulate it, it's fine + * to use it. + * + */ +#define MIPS_RDHWR(rd, value) { \ + __asm__ __volatile__ (".set push\n\t" \ + ".set mips32r2\n\t" \ + "rdhwr %0, "rd"\n\t" \ + ".set pop" \ + : "=r" (value)); \ + } + +static inline int64_t cpu_get_real_ticks(void) +{ + /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */ + uint32_t count; + static uint32_t cyc_per_count = 0; + + if (!cyc_per_count) { + MIPS_RDHWR("$3", cyc_per_count); + } + + MIPS_RDHWR("$2", count); + return (int64_t)(count * cyc_per_count); +} + +#elif defined(__alpha__) + +static inline int64_t cpu_get_real_ticks(void) +{ + uint64_t cc; + uint32_t cur, ofs; + + asm volatile("rpcc %0" : "=r"(cc)); + cur = cc; + ofs = cc >> 32; + return cur - ofs; +} + +#else +/* The host CPU doesn't have an easily accessible cycle counter. + Just return a monotonically increasing value. This will be + totally wrong, but hopefully better than nothing. */ +static inline int64_t cpu_get_real_ticks (void) +{ + static int64_t ticks = 0; + return ticks++; +} +#endif + +#ifdef CONFIG_PROFILER +static inline int64_t profile_getclock(void) +{ + return cpu_get_real_ticks(); +} + +extern int64_t qemu_time, qemu_time_start; +extern int64_t tlb_flush_time; +extern int64_t dev_time; +#endif + +#endif diff --git a/contrib/qemu/include/qemu/typedefs.h b/contrib/qemu/include/qemu/typedefs.h new file mode 100644 index 00000000000..ac9f8d41a35 --- /dev/null +++ b/contrib/qemu/include/qemu/typedefs.h @@ -0,0 +1,69 @@ +#ifndef QEMU_TYPEDEFS_H +#define QEMU_TYPEDEFS_H + +/* A load of opaque types so that device init declarations don't have to + pull in all the real definitions. */ +typedef struct QEMUTimer QEMUTimer; +typedef struct QEMUFile QEMUFile; +typedef struct QEMUBH QEMUBH; + +struct Monitor; +typedef struct Monitor Monitor; +typedef struct MigrationParams MigrationParams; + +typedef struct Property Property; +typedef struct PropertyInfo PropertyInfo; +typedef struct CompatProperty CompatProperty; +typedef struct DeviceState DeviceState; +typedef struct BusState BusState; +typedef struct BusClass BusClass; + +typedef struct AddressSpace AddressSpace; +typedef struct MemoryRegion MemoryRegion; +typedef struct MemoryRegionSection MemoryRegionSection; + +typedef struct MemoryMappingList MemoryMappingList; + +typedef struct NICInfo NICInfo; +typedef struct HCIInfo HCIInfo; +typedef struct AudioState AudioState; +typedef struct BlockDriverState BlockDriverState; +typedef struct DriveInfo DriveInfo; +typedef struct DisplayState DisplayState; +typedef struct DisplayChangeListener DisplayChangeListener; +typedef struct DisplaySurface DisplaySurface; +typedef struct PixelFormat PixelFormat; +typedef struct QemuConsole QemuConsole; +typedef struct CharDriverState CharDriverState; +typedef struct MACAddr MACAddr; +typedef struct NetClientState NetClientState; +typedef struct i2c_bus i2c_bus; +typedef struct ISABus ISABus; +typedef struct ISADevice ISADevice; +typedef struct SMBusDevice SMBusDevice; +typedef struct PCIHostState PCIHostState; +typedef struct PCIExpressHost PCIExpressHost; +typedef struct PCIBus PCIBus; +typedef struct PCIDevice PCIDevice; +typedef struct PCIExpressDevice PCIExpressDevice; +typedef struct PCIBridge PCIBridge; +typedef struct PCIEAERMsg PCIEAERMsg; +typedef struct PCIEAERLog PCIEAERLog; +typedef struct PCIEAERErr PCIEAERErr; +typedef struct PCIEPort PCIEPort; +typedef struct PCIESlot PCIESlot; +typedef struct MSIMessage MSIMessage; +typedef struct SerialState SerialState; +typedef struct PCMCIACardState PCMCIACardState; +typedef struct MouseTransformInfo MouseTransformInfo; +typedef struct uWireSlave uWireSlave; +typedef struct I2SCodec I2SCodec; +typedef struct SSIBus SSIBus; +typedef struct EventNotifier EventNotifier; +typedef struct VirtIODevice VirtIODevice; +typedef struct QEMUSGList QEMUSGList; +typedef struct SHPCDevice SHPCDevice; +typedef struct FWCfgState FWCfgState; +typedef struct PcGuestInfo PcGuestInfo; + +#endif /* QEMU_TYPEDEFS_H */ diff --git a/contrib/qemu/include/sysemu/os-posix.h b/contrib/qemu/include/sysemu/os-posix.h new file mode 100644 index 00000000000..25d0b2a73f6 --- /dev/null +++ b/contrib/qemu/include/sysemu/os-posix.h @@ -0,0 +1,52 @@ +/* + * posix specific declarations + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2010 Jes Sorensen <Jes.Sorensen@redhat.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef QEMU_OS_POSIX_H +#define QEMU_OS_POSIX_H + +void os_set_line_buffering(void); +void os_set_proc_name(const char *s); +void os_setup_signal_handling(void); +void os_daemonize(void); +void os_setup_post(void); +int os_mlock(void); + +typedef struct timeval qemu_timeval; +#define qemu_gettimeofday(tp) gettimeofday(tp, NULL) + +#ifndef CONFIG_UTIMENSAT +#ifndef UTIME_NOW +# define UTIME_NOW ((1l << 30) - 1l) +#endif +#ifndef UTIME_OMIT +# define UTIME_OMIT ((1l << 30) - 2l) +#endif +#endif +typedef struct timespec qemu_timespec; +int qemu_utimens(const char *path, const qemu_timespec *times); + +bool is_daemonized(void); + +#endif diff --git a/contrib/qemu/include/sysemu/sysemu.h b/contrib/qemu/include/sysemu/sysemu.h new file mode 100644 index 00000000000..3caeb66eb2f --- /dev/null +++ b/contrib/qemu/include/sysemu/sysemu.h @@ -0,0 +1,200 @@ +#ifndef SYSEMU_H +#define SYSEMU_H +/* Misc. things related to the system emulator. */ + +#include "qemu/typedefs.h" +#include "qemu/option.h" +#include "qemu/queue.h" +#include "qemu/timer.h" +#include "qapi-types.h" +#include "qemu/notify.h" +#include "qemu/main-loop.h" + +/* vl.c */ + +extern const char *bios_name; + +extern const char *qemu_name; +extern uint8_t qemu_uuid[]; +int qemu_uuid_parse(const char *str, uint8_t *uuid); +#define UUID_FMT "%02hhx%02hhx%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx" + +bool runstate_check(RunState state); +void runstate_set(RunState new_state); +int runstate_is_running(void); +bool runstate_needs_reset(void); +typedef struct vm_change_state_entry VMChangeStateEntry; +typedef void VMChangeStateHandler(void *opaque, int running, RunState state); + +VMChangeStateEntry *qemu_add_vm_change_state_handler(VMChangeStateHandler *cb, + void *opaque); +void qemu_del_vm_change_state_handler(VMChangeStateEntry *e); +void vm_state_notify(int running, RunState state); + +#define VMRESET_SILENT false +#define VMRESET_REPORT true + +void vm_start(void); +int vm_stop(RunState state); +int vm_stop_force_state(RunState state); + +typedef enum WakeupReason { + QEMU_WAKEUP_REASON_OTHER = 0, + QEMU_WAKEUP_REASON_RTC, + QEMU_WAKEUP_REASON_PMTIMER, +} WakeupReason; + +void qemu_system_reset_request(void); +void qemu_system_suspend_request(void); +void qemu_register_suspend_notifier(Notifier *notifier); +void qemu_system_wakeup_request(WakeupReason reason); +void qemu_system_wakeup_enable(WakeupReason reason, bool enabled); +void qemu_register_wakeup_notifier(Notifier *notifier); +void qemu_system_shutdown_request(void); +void qemu_system_powerdown_request(void); +void qemu_register_powerdown_notifier(Notifier *notifier); +void qemu_system_debug_request(void); +void qemu_system_vmstop_request(RunState reason); +int qemu_shutdown_requested_get(void); +int qemu_reset_requested_get(void); +void qemu_system_killed(int signal, pid_t pid); +void qemu_devices_reset(void); +void qemu_system_reset(bool report); + +void qemu_add_exit_notifier(Notifier *notify); +void qemu_remove_exit_notifier(Notifier *notify); + +void qemu_add_machine_init_done_notifier(Notifier *notify); + +void do_savevm(Monitor *mon, const QDict *qdict); +int load_vmstate(const char *name); +void do_delvm(Monitor *mon, const QDict *qdict); +void do_info_snapshots(Monitor *mon, const QDict *qdict); + +void qemu_announce_self(void); + +bool qemu_savevm_state_blocked(Error **errp); +void qemu_savevm_state_begin(QEMUFile *f, + const MigrationParams *params); +int qemu_savevm_state_iterate(QEMUFile *f); +void qemu_savevm_state_complete(QEMUFile *f); +void qemu_savevm_state_cancel(void); +uint64_t qemu_savevm_state_pending(QEMUFile *f, uint64_t max_size); +int qemu_loadvm_state(QEMUFile *f); + +/* SLIRP */ +void do_info_slirp(Monitor *mon); + +typedef enum DisplayType +{ + DT_DEFAULT, + DT_CURSES, + DT_SDL, + DT_GTK, + DT_NOGRAPHIC, + DT_NONE, +} DisplayType; + +extern int autostart; + +typedef enum { + VGA_NONE, VGA_STD, VGA_CIRRUS, VGA_VMWARE, VGA_XENFB, VGA_QXL, +} VGAInterfaceType; + +extern int vga_interface_type; +#define xenfb_enabled (vga_interface_type == VGA_XENFB) +#define qxl_enabled (vga_interface_type == VGA_QXL) + +extern int graphic_width; +extern int graphic_height; +extern int graphic_depth; +extern DisplayType display_type; +extern const char *keyboard_layout; +extern int win2k_install_hack; +extern int alt_grab; +extern int ctrl_grab; +extern int smp_cpus; +extern int max_cpus; +extern int cursor_hide; +extern int graphic_rotate; +extern int no_quit; +extern int no_shutdown; +extern int semihosting_enabled; +extern int old_param; +extern int boot_menu; +extern uint8_t *boot_splash_filedata; +extern size_t boot_splash_filedata_size; +extern uint8_t qemu_extra_params_fw[2]; +extern QEMUClock *rtc_clock; + +#define MAX_NODES 64 +#define MAX_CPUMASK_BITS 255 +extern int nb_numa_nodes; +extern uint64_t node_mem[MAX_NODES]; +extern unsigned long *node_cpumask[MAX_NODES]; + +#define MAX_OPTION_ROMS 16 +typedef struct QEMUOptionRom { + const char *name; + int32_t bootindex; +} QEMUOptionRom; +extern QEMUOptionRom option_rom[MAX_OPTION_ROMS]; +extern int nb_option_roms; + +#define MAX_PROM_ENVS 128 +extern const char *prom_envs[MAX_PROM_ENVS]; +extern unsigned int nb_prom_envs; + +/* pci-hotplug */ +void pci_device_hot_add(Monitor *mon, const QDict *qdict); +int pci_drive_hot_add(Monitor *mon, const QDict *qdict, DriveInfo *dinfo); +void do_pci_device_hot_remove(Monitor *mon, const QDict *qdict); + +/* generic hotplug */ +void drive_hot_add(Monitor *mon, const QDict *qdict); + +/* CPU hotplug */ +void qemu_register_cpu_added_notifier(Notifier *notifier); + +/* pcie aer error injection */ +void pcie_aer_inject_error_print(Monitor *mon, const QObject *data); +int do_pcie_aer_inject_error(Monitor *mon, + const QDict *qdict, QObject **ret_data); + +/* serial ports */ + +#define MAX_SERIAL_PORTS 4 + +extern CharDriverState *serial_hds[MAX_SERIAL_PORTS]; + +/* parallel ports */ + +#define MAX_PARALLEL_PORTS 3 + +extern CharDriverState *parallel_hds[MAX_PARALLEL_PORTS]; + +void do_usb_add(Monitor *mon, const QDict *qdict); +void do_usb_del(Monitor *mon, const QDict *qdict); +void usb_info(Monitor *mon, const QDict *qdict); + +void rtc_change_mon_event(struct tm *tm); + +void add_boot_device_path(int32_t bootindex, DeviceState *dev, + const char *suffix); +char *get_boot_devices_list(size_t *size); + +DeviceState *get_boot_device(uint32_t position); + +QemuOpts *qemu_get_machine_opts(void); + +bool usb_enabled(bool default_usb); + +extern QemuOptsList qemu_drive_opts; +extern QemuOptsList qemu_chardev_opts; +extern QemuOptsList qemu_device_opts; +extern QemuOptsList qemu_netdev_opts; +extern QemuOptsList qemu_net_opts; +extern QemuOptsList qemu_global_opts; +extern QemuOptsList qemu_mon_opts; + +#endif diff --git a/contrib/qemu/include/trace.h b/contrib/qemu/include/trace.h new file mode 100644 index 00000000000..c15f4981280 --- /dev/null +++ b/contrib/qemu/include/trace.h @@ -0,0 +1,6 @@ +#ifndef TRACE_H +#define TRACE_H + +#include "trace/generated-tracers.h" + +#endif /* TRACE_H */ |