00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053 #ifndef XCT_H
00054 #define XCT_H
00055
00056 #include "w_defines.h"
00057
00058
00059
00060 #ifdef __GNUG__
00061 #pragma interface
00062 #endif
00063
00064 #if W_DEBUG_LEVEL > 2
00065
00066
00067
00068
00069 #define X_LOG_COMMENT_ON 1
00070 #define ADD_LOG_COMMENT_SIG ,const char *debugmsg
00071 #define ADD_LOG_COMMENT_USE ,debugmsg
00072 #define LOG_COMMENT_USE(x) ,x
00073
00074 #else
00075
00076 #define X_LOG_COMMENT_ON 0
00077 #define ADD_LOG_COMMENT_SIG
00078 #define ADD_LOG_COMMENT_USE
00079 #define LOG_COMMENT_USE(x)
00080 #endif
00081
00082 class xct_dependent_t;
00083
00084
00085
00086
00087
00088
00089
00090 class xct_log_t : public smlevel_1 {
00091 private:
00092
00093 bool _xct_log_off;
00094 public:
00095 NORET xct_log_t(): _xct_log_off(false) {};
00096 bool xct_log_is_off() { return _xct_log_off; }
00097 void set_xct_log_off() { _xct_log_off = true; }
00098 void set_xct_log_on() { _xct_log_off = false; }
00099 };
00100
00101
00102 class lockid_t;
00103 class sdesc_cache_t;
00104 class xct_i;
00105 class restart_m;
00106 class lock_m;
00107 class lock_core_m;
00108 class lock_request_t;
00109 class xct_log_switch_t;
00110 class xct_lock_info_t;
00111 class xct_prepare_alk_log;
00112 class xct_prepare_fi_log;
00113 class xct_prepare_lk_log;
00114 class sm_quark_t;
00115 class smthread_t;
00116
00117 class logrec_t;
00118 class page_p;
00119
00120
00121
00122
00123
00124
00125 class stid_list_elem_t {
00126 public:
00127 stid_t stid;
00128 w_link_t _link;
00129
00130 stid_list_elem_t(const stid_t& theStid)
00131 : stid(theStid)
00132 {};
00133 ~stid_list_elem_t()
00134 {
00135 if (_link.member_of() != NULL)
00136 _link.detach();
00137 }
00138 static w_base_t::uint4_t link_offset()
00139 {
00140 return W_LIST_ARG(stid_list_elem_t, _link);
00141 }
00142 };
00143
00144
00145
00146
00147
00148
00149
00150
00151
00152
00153
00154 class xct_t : public smlevel_1 {
00155
00156 #if USE_BLOCK_ALLOC_FOR_LOGREC
00157 friend class block_alloc<xct_t>;
00158 #endif
00159 friend class xct_i;
00160 friend class smthread_t;
00161 friend class restart_m;
00162 friend class lock_m;
00163 friend class lock_core_m;
00164 friend class lock_request_t;
00165 friend class xct_log_switch_t;
00166 friend class xct_prepare_alk_log;
00167 friend class xct_prepare_fi_log;
00168 friend class xct_prepare_lk_log;
00169 friend class sm_quark_t;
00170
00171 protected:
00172 enum commit_t { t_normal = 0, t_lazy = 1, t_chain = 2, t_group = 4 };
00173
00174
00175
00176 public:
00177 typedef xct_state_t state_t;
00178
00179 static
00180 xct_t* new_xct(
00181 sm_stats_info_t* stats = 0,
00182 timeout_in_ms timeout = WAIT_SPECIFIED_BY_THREAD);
00183
00184 static
00185 xct_t* new_xct(
00186 const tid_t& tid,
00187 state_t s,
00188 const lsn_t& last_lsn,
00189 const lsn_t& undo_nxt,
00190 timeout_in_ms timeout = WAIT_SPECIFIED_BY_THREAD);
00191 static
00192 void destroy_xct(xct_t* xd);
00193
00194 static
00195 rc_t group_commit(const xct_t *list[], int number);
00196
00197 rc_t commit_free_locks();
00198
00199 #if defined(USE_BLOCK_ALLOC_FOR_XCT_IMPL) && (USE_BLOCK_ALLOC_FOR_XCT_IMPL==1)
00200 public:
00201 #else
00202 private:
00203 #endif
00204 struct xct_core;
00205 private:
00206 NORET xct_t(
00207 xct_core* core,
00208 sm_stats_info_t* stats,
00209 const lsn_t& last_lsn,
00210 const lsn_t& undo_nxt);
00211 NORET ~xct_t();
00212
00213 public:
00214
00215 friend ostream& operator<<(ostream&, const xct_t&);
00216
00217 static int collect(vtable_t&, bool names_too);
00218 void vtable_collect(vtable_row_t &);
00219 static void vtable_collect_names(vtable_row_t &);
00220
00221 state_t state() const;
00222 void set_timeout(timeout_in_ms t) ;
00223
00224 timeout_in_ms timeout_c() const;
00225
00226
00227
00228
00229 public:
00230 void force_readonly();
00231 bool forced_readonly() const;
00232
00233 vote_t vote() const;
00234 bool is_extern2pc() const;
00235 rc_t enter2pc(const gtid_t &g);
00236 const gtid_t* gtid() const;
00237 const server_handle_t& get_coordinator()const;
00238 void set_coordinator(const server_handle_t &);
00239 static rc_t recover2pc(const gtid_t &g,
00240 bool mayblock, xct_t *&);
00241 static rc_t query_prepared(int &numtids);
00242 static rc_t query_prepared(int numtids, gtid_t l[]);
00243
00244 rc_t prepare();
00245 rc_t log_prepared(bool in_chkpt=false);
00246
00247
00248
00249
00250 public:
00251 static void dump(ostream &o);
00252 static int cleanup(bool dispose_prepared=false);
00253
00254
00255
00256 bool is_instrumented() {
00257 return (__stats != 0);
00258 }
00259 void give_stats(sm_stats_info_t* s) {
00260 w_assert1(__stats == 0);
00261 __stats = s;
00262 }
00263 void clear_stats() {
00264 memset(__stats,0, sizeof(*__stats));
00265 }
00266 sm_stats_info_t* steal_stats() {
00267 sm_stats_info_t*s = __stats;
00268 __stats = 0;
00269 return s;
00270 }
00271 const sm_stats_info_t& const_stats_ref() { return *__stats; }
00272 rc_t commit(bool lazy = false, lsn_t* plastlsn=NULL);
00273 rc_t commit_as_group_member();
00274 rc_t rollback(const lsn_t &save_pt);
00275 rc_t save_point(lsn_t& lsn);
00276 rc_t chain(bool lazy = false);
00277 rc_t abort(bool save_stats = false);
00278
00279
00280 protected:
00281 sm_stats_info_t& stats_ref() { return *__stats; }
00282 rc_t dispose();
00283 void change_state(state_t new_state);
00284 void set_first_lsn(const lsn_t &) ;
00285 void set_last_lsn(const lsn_t &) ;
00286 void set_undo_nxt(const lsn_t &) ;
00287 void prepare_restore_log_resv(int, int, int, int);
00288
00289
00290 public:
00291
00292
00293 const lsn_t& last_lsn() const;
00294 const lsn_t& first_lsn() const;
00295 const lsn_t& undo_nxt() const;
00296 const logrec_t* last_log() const;
00297 fileoff_t get_log_space_used() const;
00298 rc_t wait_for_log_space(fileoff_t amt);
00299
00300
00301 static xct_t* look_up(const tid_t& tid);
00302 static tid_t oldest_tid();
00303 static tid_t youngest_tid();
00304
00305 static void update_youngest_tid(const tid_t &);
00306
00307
00308
00309 static w_base_t::uint4_t num_active_xcts();
00310
00311
00312
00313 const lsn_t& anchor(bool grabit = true);
00314 void release_anchor(bool compensate
00315 ADD_LOG_COMMENT_SIG
00316 );
00317 int compensated_op_depth() const ;
00318
00319
00320
00321
00322
00323
00324
00325
00326
00327
00328
00329
00330
00331
00332
00333
00334
00335
00336
00337 void start_crit() {
00338
00339 w_assert0(update_threads() == 1);
00340 }
00341 void stop_crit() {}
00342
00343
00344 void compensate(const lsn_t&,
00345 bool undoable
00346 ADD_LOG_COMMENT_SIG
00347 );
00348
00349 void compensate_undo(const lsn_t&);
00350
00351
00352
00353
00354
00355
00356
00357 void log_warn_disable();
00358 void log_warn_resume();
00359 bool log_warn_is_on() const;
00360
00361
00362
00363 public:
00364
00365 rc_t add_dependent(xct_dependent_t* dependent);
00366 rc_t remove_dependent(xct_dependent_t* dependent);
00367 bool find_dependent(xct_dependent_t* dependent);
00368
00369
00370
00371
00372 bool is_log_on() const;
00373 rc_t get_logbuf(logrec_t*&, int t,
00374 const page_p *p = 0);
00375 rc_t give_logbuf(logrec_t*, const page_p *p = 0);
00376
00377
00378
00379
00380 void AddStoreToFree(const stid_t& stid);
00381 void AddLoadStore(const stid_t& stid);
00382
00383 void set_alloced() { }
00384
00385 void num_extents_marked_for_deletion(
00386 base_stat_t &num);
00387 public:
00388
00389 void GetEscalationThresholds(
00390 w_base_t::int4_t &toPage,
00391 w_base_t::int4_t &toStore,
00392 w_base_t::int4_t &toVolume);
00393 void SetEscalationThresholds(
00394 w_base_t::int4_t toPage,
00395 w_base_t::int4_t toStore,
00396 w_base_t::int4_t toVolume);
00397 bool set_lock_cache_enable(bool enable);
00398 bool lock_cache_enabled();
00399
00400 protected:
00401
00402
00403
00404
00405 static lockid_t* new_lock_hierarchy();
00406 static sdesc_cache_t* new_sdesc_cache_t();
00407 static xct_log_t* new_xct_log_t();
00408 void steal(lockid_t*&, sdesc_cache_t*&, xct_log_t*&);
00409 void stash(lockid_t*&, sdesc_cache_t*&, xct_log_t*&);
00410
00411 void attach_thread();
00412 void detach_thread();
00413
00414
00415
00416 lockid_t* lock_info_hierarchy() const {
00417 return me()->lock_hierarchy();
00418 }
00419 public:
00420
00421 sdesc_cache_t* sdesc_cache() const;
00422
00423 protected:
00424
00425
00426 switch_t set_log_state(switch_t s);
00427
00428 void restore_log_state(switch_t s);
00429
00430
00431 public:
00432 concurrency_t get_lock_level();
00433 void lock_level(concurrency_t l);
00434
00435 int num_threads();
00436 rc_t check_one_thread_attached() const;
00437 int attach_update_thread();
00438 void detach_update_thread();
00439 int update_threads() const;
00440
00441 protected:
00442
00443 w_rc_t lockblock(timeout_in_ms timeout);
00444 void lockunblock();
00445 const w_base_t::int4_t* GetEscalationThresholdsArray();
00446
00447 rc_t check_lock_totals(int nex,
00448 int nix, int nsix, int ) const;
00449 rc_t obtain_locks(lock_mode_t mode,
00450 int nlks, const lockid_t *l);
00451 rc_t obtain_one_lock(lock_mode_t mode,
00452 const lockid_t &l);
00453
00454 xct_lock_info_t* lock_info() const;
00455
00456 public:
00457
00458
00459
00460 static w_rc_t acquire_xlist_mutex();
00461 static void release_xlist_mutex();
00462 static void assert_xlist_mutex_not_mine();
00463 static void assert_xlist_mutex_is_mine();
00464 static bool xlist_mutex_is_mine();
00465
00466
00467
00468
00469
00470
00471
00472
00473
00474
00475
00476
00477
00478
00479 void force_nonblocking();
00480
00481
00482
00483
00484
00485 protected:
00486
00487 w_link_t _xlink;
00488 static w_descend_list_t<xct_t, queue_based_lock_t, tid_t> _xlist;
00489 void put_in_order();
00490 private:
00491 static queue_based_lock_t _xlist_mutex;
00492
00493 sm_stats_info_t* __stats;
00494 lockid_t* __saved_lockid_t;
00495 sdesc_cache_t* __saved_sdesc_cache_t;
00496 xct_log_t* __saved_xct_log_t;
00497
00498 static tid_t _nxt_tid;
00499
00500 static tid_t _oldest_tid;
00501
00502
00503
00504 tid_t _tid;
00505
00506 public:
00507 void acquire_1thread_xct_mutex() const;
00508 void release_1thread_xct_mutex() const;
00509 bool is_1thread_log_mutex_mine() const;
00510
00511
00512 private:
00513 void acquire_1thread_log_mutex();
00514 void release_1thread_log_mutex();
00515 void assert_1thread_log_mutex_free()const;
00516 private:
00517 bool is_1thread_xct_mutex_mine() const;
00518 void assert_1thread_xct_mutex_free()const;
00519
00520 rc_t _abort();
00521 rc_t _commit(w_base_t::uint4_t flags,
00522 lsn_t* plastlsn=NULL);
00523
00524 protected:
00525
00526 switch_t set_log_state(switch_t s, bool &nested);
00527 void restore_log_state(switch_t s, bool nested);
00528
00529 private:
00530 bool one_thread_attached() const;
00531
00532 void _compensate(const lsn_t&, bool undoable = false);
00533
00534 w_base_t::int4_t escalationThresholds[lockid_t::NUMLEVELS-1];
00535 public:
00536 void SetDefaultEscalationThresholds();
00537
00538 void ClearAllStoresToFree();
00539 void FreeAllStoresToFree();
00540 rc_t PrepareLogAllStoresToFree();
00541 void DumpStoresToFree();
00542 rc_t ConvertAllLoadStoresToRegularStores();
00543 void ClearAllLoadStores();
00544
00545 ostream & dump_locks(ostream &) const;
00546
00547
00548 private:
00549
00550
00551
00552
00553
00554 static void xct_stats(
00555 u_long& begins,
00556 u_long& commits,
00557 u_long& aborts,
00558 bool reset);
00559
00560 w_rc_t _flush_logbuf();
00561 w_rc_t _sync_logbuf(bool block=true);
00562 void _teardown(bool is_chaining);
00563
00564 #if defined(USE_BLOCK_ALLOC_FOR_XCT_IMPL) && (USE_BLOCK_ALLOC_FOR_XCT_IMPL==1)
00565 public:
00566 #else
00567 private:
00568 #endif
00569
00570
00571
00572
00573
00574
00575
00576
00577
00578
00579
00580 struct xct_core
00581 {
00582 xct_core(tid_t const &t, state_t s, timeout_in_ms timeout);
00583 ~xct_core();
00584
00585
00586 tid_t _tid;
00587 timeout_in_ms _timeout;
00588 bool _warn_on;
00589 xct_lock_info_t* _lock_info;
00590
00591
00592
00593
00594
00595
00596
00597
00598
00599 bool _lock_cache_enable;
00600
00601
00602
00603
00604 queue_based_lock_t _1thread_xct;
00605
00606
00607
00608 volatile int _updating_operations;
00609
00610
00611 volatile int _threads_attached;
00612
00613
00614 pthread_cond_t _waiters_cond;
00615 mutable pthread_mutex_t _waiters_mutex;
00616
00617 state_t _state;
00618 bool _forced_readonly;
00619 vote_t _vote;
00620 gtid_t * _global_tid;
00621 server_handle_t* _coord_handle;
00622 bool _read_only;
00623
00624
00625
00626
00627
00628 w_list_t<stid_list_elem_t,queue_based_lock_t> _storesToFree;
00629
00630
00631
00632
00633
00634 w_list_t<stid_list_elem_t,queue_based_lock_t> _loadStores;
00635
00636 volatile int _xct_ended;
00637 bool _xct_aborting;
00638
00639 };
00640
00641 private:
00642
00643
00644
00645
00646
00647
00648 mutable queue_based_lock_t _1thread_log;
00649
00650 lsn_t _first_lsn;
00651 lsn_t _last_lsn;
00652 lsn_t _undo_nxt;
00653
00654
00655
00656 w_list_t<xct_dependent_t,queue_based_lock_t> _dependent_list;
00657
00658
00659
00660
00661 static lockid_t::name_space_t convert(concurrency_t cc);
00662 static concurrency_t convert(lockid_t::name_space_t n);
00663
00664
00665
00666
00667 logrec_t* _last_log;
00668 logrec_t* _log_buf;
00669
00670
00671
00672
00673 fileoff_t _log_bytes_rsvd;
00674 fileoff_t _log_bytes_ready;
00675 fileoff_t _log_bytes_used;
00676 fileoff_t _log_bytes_used_fwd;
00677
00678
00679 fileoff_t _log_bytes_reserved_space;
00680
00681 bool _rolling_back;
00682
00683
00684
00685 bool should_consume_rollback_resv(int t) const;
00686 bool should_reserve_for_rollback(int t)
00687 const {
00688 return ! should_consume_rollback_resv(t);
00689 }
00690
00691 #if CHECK_NESTING_VARIABLES
00692
00693 volatile int _acquire_1thread_log_depth;
00694 public:
00695 void inc_acquire_1thread_log_depth() { _acquire_1thread_log_depth ++; }
00696 void dec_acquire_1thread_log_depth() { -- _acquire_1thread_log_depth; }
00697 int acquire_1thread_log_depth() const { return
00698 _acquire_1thread_log_depth; }
00699 #else
00700 public:
00701 void inc_acquire_1thread_log_depth() { }
00702 void dec_acquire_1thread_log_depth() { }
00703 int acquire_1thread_log_depth() const { return 0; }
00704 #endif
00705 private:
00706
00707 volatile int _in_compensated_op;
00708
00709
00710 lsn_t _anchor;
00711
00712
00713 xct_core* _core;
00714
00715 public:
00716 bool rolling_back() const { return _rolling_back; }
00717 #if W_DEBUG_LEVEL > 2
00718 private:
00719 bool _had_error;
00720 public:
00721
00722
00723 void set_error_encountered() { _had_error = true; }
00724 bool error_encountered() const {
00725 return _had_error; }
00726 #else
00727 void set_error_encountered() {}
00728 bool error_encountered() const { return false; }
00729 #endif
00730 tid_t tid() const {
00731 w_assert1(_tid == _core->_tid);
00732 return _tid; }
00733 };
00734
00735
00736
00737
00738 class auto_release_anchor_t {
00739 bool _compensate;
00740 xct_t* _xct;
00741 public:
00742 auto_release_anchor_t (bool and_compensate) :
00743 _compensate(and_compensate), _xct(xct())
00744 {}
00745 ~auto_release_anchor_t ()
00746 {
00747 _xct->release_anchor(_compensate LOG_COMMENT_USE("auto_release_anchor_t"));
00748 }
00749 };
00750
00751
00752 class auto_rollback_t {
00753 private:
00754 xct_t* _xd;
00755 lsn_t _save_pt;
00756 bool _roll;
00757 static int _count;
00758 int _test;
00759 int _line;
00760 const char *_file;
00761 public:
00762
00763
00764 w_rc_t test(int x) { _test=x;
00765 if(_test && (_count % _test==0))
00766 return RC(smlevel_0::eOUTOFLOGSPACE);
00767 return RCOK;
00768 }
00769
00770 #define AUTO_ROLLBACK_work auto_rollback_t work(__LINE__, __FILE__);
00771 auto_rollback_t(int line, const char *file)
00772 : _xd(xct()), _roll(true), _test(0),
00773 _line(line), _file(file)
00774 {
00775
00776 _count++;
00777 if(_xd) {
00778
00779 W_COERCE(_xd->save_point(_save_pt));
00780 }
00781 }
00782 void ok() { _roll = false; }
00783
00784 ~auto_rollback_t() {
00785
00786 if(_test && (_count % _test==0)) _roll = true;
00787 if(_roll && _xd) {
00788 _xd->set_error_encountered();
00789 W_COERCE(_xd->rollback(_save_pt));
00790 INC_TSTAT(internal_rollback_cnt);
00791 #if 0 && W_DEBUG_LEVEL > 0
00792 cerr << "Internal rollback to " << _save_pt
00793 << " from " << _line
00794 << " " << _file
00795 << endl;
00796 #endif
00797 }
00798 }
00799 };
00800
00801
00802
00803
00804
00805
00806 #if X_LOG_COMMENT_ON
00807 #define X_DO1(x,anchor,line) \
00808 { \
00809 w_rc_t __e = (x); \
00810 if (__e.is_error()) { \
00811 w_assert3(xct()); \
00812 W_COERCE(xct()->rollback(anchor)); \
00813 xct()->release_anchor(true LOG_COMMENT_USE("X_DO1")); \
00814 return RC_AUGMENT(__e); \
00815 } \
00816 }
00817 #define to_string(x) # x
00818 #define X_DO(x,anchor) X_DO1(x,anchor, to_string(x))
00819
00820 #else
00821
00822 #define X_DO(x,anchor) \
00823 { \
00824 w_rc_t __e = (x); \
00825 if (__e.is_error()) { \
00826 w_assert3(xct()); \
00827 W_COERCE(xct()->rollback(anchor)); \
00828 xct()->release_anchor(true LOG_COMMENT_USE("X_DO")); \
00829 return RC_AUGMENT(__e); \
00830 } \
00831 }
00832 #endif
00833
00834
00835 class xct_log_switch_t : public smlevel_0 {
00836
00837
00838
00839
00840 switch_t old_state;
00841 public:
00842
00843 NORET xct_log_switch_t(switch_t s) : old_state(OFF)
00844 {
00845 if(smlevel_1::log) {
00846 INC_TSTAT(log_switches);
00847 if (xct()) {
00848 old_state = xct()->set_log_state(s);
00849 }
00850 }
00851 }
00852
00853 NORET
00854 ~xct_log_switch_t() {
00855 if(smlevel_1::log) {
00856 if (xct()) {
00857 xct()->restore_log_state(old_state);
00858 }
00859 }
00860 }
00861 };
00862
00863 inline
00864 bool xct_t::is_log_on() const {
00865 return (me()->xct_log()->xct_log_is_off() == false);
00866 }
00867
00868
00869
00870
00871
00872
00873
00874
00875
00876
00877
00878
00879 class xct_i {
00880 public:
00881
00882
00883
00884
00885
00886
00887
00888 bool locked_by_me() const {
00889 if(xct_t::xlist_mutex_is_mine()) {
00890 W_IFDEBUG1(if(_may_check) w_assert1(_locked);)
00891 return true;
00892 }
00893 return false;
00894 }
00895
00896
00897 void never_mind() {
00898
00899
00900 if(_locked && locked_by_me()) {
00901 *(const_cast<bool *>(&_locked)) = false;
00902 xct_t::release_xlist_mutex();
00903 }
00904 }
00905
00906 xct_t* curr() const { return unsafe_iterator.curr(); }
00907
00908 xct_t* next() { return unsafe_iterator.next(); }
00909
00910
00911
00912 static bool init_locked(bool lockit)
00913 {
00914 if(lockit) {
00915 W_COERCE(xct_t::acquire_xlist_mutex());
00916 }
00917 return lockit;
00918 }
00919
00920
00921
00922
00923
00924
00925
00926
00927 NORET xct_i(bool locked_accesses)
00928 : _locked(init_locked(locked_accesses)),
00929 _may_check(locked_accesses),
00930 unsafe_iterator(xct_t::_xlist)
00931 {
00932 w_assert1(_locked == locked_accesses);
00933 _check(_locked);
00934 }
00935
00936
00937 NORET ~xct_i() {
00938 if(locked_by_me()) {
00939 _check(true);
00940 never_mind();
00941 _check(false);
00942 }
00943 }
00944
00945 private:
00946 void _check(bool b) const {
00947 if(!_may_check) return;
00948 if(b) xct_t::assert_xlist_mutex_is_mine();
00949 else xct_t::assert_xlist_mutex_not_mine();
00950 }
00951
00952 const bool _locked;
00953 const bool _may_check;
00954 w_list_i<xct_t,queue_based_lock_t> unsafe_iterator;
00955
00956
00957 xct_i(const xct_i&);
00958 xct_i& operator=(const xct_i&);
00959 };
00960
00961
00962
00963 inline
00964 xct_t::state_t
00965 xct_t::state() const
00966 {
00967 return _core->_state;
00968 }
00969
00970
00971
00972
00973 class xct_auto_abort_t : public smlevel_1 {
00974 public:
00975 xct_auto_abort_t() : _xct(xct_t::new_xct()) {}
00976 ~xct_auto_abort_t() {
00977 switch(_xct->state()) {
00978 case smlevel_1::xct_ended:
00979
00980 break;
00981 case smlevel_1::xct_active:
00982 case smlevel_1::xct_freeing_space:
00983 case smlevel_1::xct_committing:
00984 W_COERCE(_xct->abort());
00985 break;
00986 default:
00987 cerr << "unexpected xct state: " << _xct->state() << endl;
00988 W_FATAL(eINTERNAL);
00989 }
00990 xct_t::destroy_xct(_xct);
00991 }
00992 rc_t commit() {
00993
00994
00995 W_DO(_xct->commit());
00996 return RCOK;
00997 }
00998 rc_t abort() {W_DO(_xct->abort()); return RCOK;}
00999
01000 private:
01001 xct_t* _xct;
01002 };
01003
01004
01005 inline
01006 bool
01007 operator>(const xct_t& x1, const xct_t& x2)
01008 {
01009 return (x1.tid() > x2.tid());
01010 }
01011
01012 inline void
01013 xct_t::SetEscalationThresholds(w_base_t::int4_t toPage,
01014 w_base_t::int4_t toStore,
01015 w_base_t::int4_t toVolume)
01016 {
01017 if (toPage != dontModifyThreshold)
01018 escalationThresholds[2] = toPage;
01019
01020 if (toStore != dontModifyThreshold)
01021 escalationThresholds[1] = toStore;
01022
01023 if (toVolume != dontModifyThreshold)
01024 escalationThresholds[0] = toVolume;
01025 }
01026
01027 inline void
01028 xct_t::SetDefaultEscalationThresholds()
01029 {
01030 SetEscalationThresholds(smlevel_0::defaultLockEscalateToPageThreshold,
01031 smlevel_0::defaultLockEscalateToStoreThreshold,
01032 smlevel_0::defaultLockEscalateToVolumeThreshold);
01033 }
01034
01035 inline void
01036 xct_t::GetEscalationThresholds(w_base_t::int4_t &toPage,
01037 w_base_t::int4_t &toStore,
01038 w_base_t::int4_t &toVolume)
01039 {
01040 toPage = escalationThresholds[2];
01041 toStore = escalationThresholds[1];
01042 toVolume = escalationThresholds[0];
01043 }
01044
01045 inline const w_base_t::int4_t *
01046 xct_t::GetEscalationThresholdsArray()
01047 {
01048 return escalationThresholds;
01049 }
01050
01051 inline
01052 xct_t::vote_t
01053 xct_t::vote() const
01054 {
01055 return _core->_vote;
01056 }
01057
01058 inline
01059 const lsn_t&
01060 xct_t::last_lsn() const
01061 {
01062 return _last_lsn;
01063 }
01064
01065 inline
01066 void
01067 xct_t::set_last_lsn( const lsn_t&l)
01068 {
01069 _last_lsn = l;
01070 }
01071
01072 inline
01073 const lsn_t&
01074 xct_t::first_lsn() const
01075 {
01076 return _first_lsn;
01077 }
01078
01079 inline
01080 void
01081 xct_t::set_first_lsn(const lsn_t &l)
01082 {
01083 _first_lsn = l;
01084 }
01085
01086 inline
01087 const lsn_t&
01088 xct_t::undo_nxt() const
01089 {
01090 return _undo_nxt;
01091 }
01092
01093 inline
01094 void
01095 xct_t::set_undo_nxt(const lsn_t &l)
01096 {
01097 _undo_nxt = l;
01098 }
01099
01100 inline
01101 const logrec_t*
01102 xct_t::last_log() const
01103 {
01104 return _last_log;
01105 }
01106
01107 inline
01108 bool
01109 xct_t::forced_readonly() const
01110 {
01111 return _core->_forced_readonly;
01112 }
01113
01114
01115
01116
01117
01118
01119
01120
01121
01122
01123 inline bool
01124 xct_t::is_extern2pc()
01125 const
01126 {
01127
01128 return _core->_global_tid != 0;
01129 }
01130
01131
01132 inline
01133 const gtid_t*
01134 xct_t::gtid() const
01135 {
01136 return _core->_global_tid;
01137 }
01138
01139
01140
01141
01142
01143 #endif