diff options
author | Etienne Dechamps <[email protected]> | 2013-11-10 15:00:11 +0000 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2013-11-23 15:08:29 -0800 |
commit | 119a394ab0eee137a5198ad3fffab45fb11ef108 (patch) | |
tree | c47e61a340eb7ff7e9ff61a4c6674d22abd72121 /module/zfs/zil.c | |
parent | 14cecbb159a45c37f84948c758db1009dda49c62 (diff) |
Only commit the ZIL once in zpl_writepages() (msync() case).
Currently, using msync() results in the following code path:
sys_msync -> zpl_fsync -> filemap_write_and_wait_range -> zpl_writepages -> write_cache_pages -> zpl_putpage
In such a code path, zil_commit() is called as part of zpl_putpage().
This means that for each page, the write is handed to the DMU, the ZIL
is committed, and only then do we move on to the next page. As one might
imagine, this results in atrocious performance where there is a large
number of pages to write: instead of committing a batch of N writes,
we do N commits containing one page each. In some extreme cases this
can result in msync() being ~700 times slower than it should be, as well
as very inefficient use of ZIL resources.
This patch fixes this issue by making sure that the requested writes
are batched and then committed only once. Unfortunately, the
implementation is somewhat non-trivial because there is no way to run
write_cache_pages in SYNC mode (so that we get all pages) without
making it wait on the writeback tag for each page.
The solution implemented here is composed of two parts:
- I added a new callback system to the ZIL, which allows the caller to
be notified when its ITX gets written to stable storage. One nice
thing is that the callback is called not only in zil_commit() but
in zil_sync() as well, which means that the caller doesn't have to
care whether the write ended up in the ZIL or the DMU: it will get
notified as soon as it's safe, period. This is an improvement over
dmu_tx_callback_register() that was used previously, which only
supports DMU writes. The rationale for this change is to allow
zpl_putpage() to be notified when a ZIL commit is completed without
having to block on zil_commit() itself.
- zpl_writepages() now calls write_cache_pages in non-SYNC mode, which
will prevent (1) write_cache_pages from blocking, and (2) zpl_putpage
from issuing ZIL commits. zpl_writepages() will issue the commit
itself instead of relying on zpl_putpage() to do it, thus nicely
batching the writes. Note, however, that we still have to call
write_cache_pages() again in SYNC mode because there is an edge case
documented in the implementation of write_cache_pages() whereas it
will not give us all dirty pages when running in non-SYNC mode. Thus
we need to run it at least once in SYNC mode to make sure we honor
persistency guarantees. This only happens when the pages are
modified at the same time msync() is running, which should be rare.
In most cases there won't be any additional pages and this second
call will do nothing.
Note that this change also fixes a bug related to #907 whereas calling
msync() on pages that were already handed over to the DMU in a previous
writepages() call would make msync() block until the next TXG sync
instead of returning as soon as the ZIL commit is complete. The new
callback system fixes that problem.
Signed-off-by: Richard Yao <[email protected]>
Signed-off-by: Brian Behlendorf <[email protected]>
Closes #1849
Closes #907
Diffstat (limited to 'module/zfs/zil.c')
-rw-r--r-- | module/zfs/zil.c | 25 |
1 files changed, 21 insertions, 4 deletions
diff --git a/module/zfs/zil.c b/module/zfs/zil.c index 3688c8a16..839afa956 100644 --- a/module/zfs/zil.c +++ b/module/zfs/zil.c @@ -1184,6 +1184,8 @@ zil_itx_create(uint64_t txtype, size_t lrsize) itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */ itx->itx_lr.lrc_seq = 0; /* defensive */ itx->itx_sync = B_TRUE; /* default is synchronous */ + itx->itx_callback = NULL; + itx->itx_callback_data = NULL; return (itx); } @@ -1209,6 +1211,8 @@ zil_itxg_clean(itxs_t *itxs) list = &itxs->i_sync_list; while ((itx = list_head(list)) != NULL) { + if (itx->itx_callback != NULL) + itx->itx_callback(itx->itx_callback_data); list_remove(list, itx); kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen); @@ -1219,6 +1223,8 @@ zil_itxg_clean(itxs_t *itxs) while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { list = &ian->ia_list; while ((itx = list_head(list)) != NULL) { + if (itx->itx_callback != NULL) + itx->itx_callback(itx->itx_callback_data); list_remove(list, itx); kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen); @@ -1285,6 +1291,8 @@ zil_remove_async(zilog_t *zilog, uint64_t oid) mutex_exit(&itxg->itxg_lock); } while ((itx = list_head(&clean_list)) != NULL) { + if (itx->itx_callback != NULL) + itx->itx_callback(itx->itx_callback_data); list_remove(&clean_list, itx); kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen); @@ -1530,15 +1538,13 @@ zil_commit_writer(zilog_t *zilog) } DTRACE_PROBE1(zil__cw1, zilog_t *, zilog); - while ((itx = list_head(&zilog->zl_itx_commit_list))) { + for (itx = list_head(&zilog->zl_itx_commit_list); itx != NULL; + itx = list_next(&zilog->zl_itx_commit_list, itx)) { txg = itx->itx_lr.lrc_txg; ASSERT(txg); if (txg > spa_last_synced_txg(spa) || txg > spa_freeze_txg(spa)) lwb = zil_lwb_commit(zilog, itx, lwb); - list_remove(&zilog->zl_itx_commit_list, itx); - kmem_free(itx, offsetof(itx_t, itx_lr) - + itx->itx_lr.lrc_reclen); } DTRACE_PROBE1(zil__cw2, zilog_t *, zilog); @@ -1560,6 +1566,17 @@ zil_commit_writer(zilog_t *zilog) if (error || lwb == NULL) txg_wait_synced(zilog->zl_dmu_pool, 0); + while ((itx = list_head(&zilog->zl_itx_commit_list))) { + txg = itx->itx_lr.lrc_txg; + ASSERT(txg); + + if (itx->itx_callback != NULL) + itx->itx_callback(itx->itx_callback_data); + list_remove(&zilog->zl_itx_commit_list, itx); + kmem_free(itx, offsetof(itx_t, itx_lr) + + itx->itx_lr.lrc_reclen); + } + mutex_enter(&zilog->zl_lock); /* |