diff --git a/client/TracyRingBuffer.hpp b/client/TracyRingBuffer.hpp index f9bd8763..4ada1d40 100644 --- a/client/TracyRingBuffer.hpp +++ b/client/TracyRingBuffer.hpp @@ -1,17 +1,17 @@ namespace tracy { +template class RingBuffer { public: - RingBuffer( uint32_t size, int fd ) - : m_size( size ) - , m_fd( fd ) + RingBuffer( int fd ) + : m_fd( fd ) { const auto pageSize = uint32_t( getpagesize() ); - assert( size >= pageSize ); - assert( __builtin_popcount( size ) == 1 ); - m_mapSize = size + pageSize; + assert( Size >= pageSize ); + assert( __builtin_popcount( Size ) == 1 ); + m_mapSize = Size + pageSize; m_mapAddr = mmap( nullptr, m_mapSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 ); if( !m_mapAddr ) { @@ -63,14 +63,14 @@ public: void Read( void* dst, uint64_t offset, uint64_t cnt ) { - auto src = ( m_metadata->data_tail + offset ) % m_size; - if( src + cnt <= m_size ) + auto src = ( m_metadata->data_tail + offset ) % Size; + if( src + cnt <= Size ) { memcpy( dst, m_buffer + src, cnt ); } else { - const auto s0 = m_size - src; + const auto s0 = Size - src; memcpy( dst, m_buffer + src, s0 ); memcpy( (char*)dst + s0, m_buffer, cnt - s0 ); } @@ -106,7 +106,6 @@ private: std::atomic_store_explicit( (volatile std::atomic*)&m_metadata->data_tail, tail, std::memory_order_release ); } - size_t m_size; size_t m_mapSize; void* m_mapAddr; diff --git a/client/TracySysTrace.cpp b/client/TracySysTrace.cpp index a2618736..165fa033 100644 --- a/client/TracySysTrace.cpp +++ b/client/TracySysTrace.cpp @@ -620,7 +620,9 @@ static const char TracePipe[] = "trace_pipe"; static std::atomic traceActive { false }; static Thread* s_threadSampling = nullptr; static int s_numCpus = 0; -static RingBuffer* s_ring = nullptr; + +static constexpr size_t RingBufSize = 64*1024; +static RingBuffer* s_ring = nullptr; static int perf_event_open( struct perf_event_attr* hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags ) { @@ -636,7 +638,7 @@ static void SetupSampling( int64_t& samplingPeriod ) samplingPeriod = 100*1000; s_numCpus = (int)std::thread::hardware_concurrency(); - s_ring = (RingBuffer*)tracy_malloc( sizeof( RingBuffer ) * s_numCpus ); + s_ring = (RingBuffer*)tracy_malloc( sizeof( RingBuffer ) * s_numCpus ); perf_event_attr pe = {}; @@ -661,11 +663,11 @@ static void SetupSampling( int64_t& samplingPeriod ) const int fd = perf_event_open( &pe, -1, i, -1, 0 ); if( fd == -1 ) { - for( int j=0; j(); tracy_free( s_ring ); return; } - new( s_ring+i ) RingBuffer( 64*1024, fd ); + new( s_ring+i ) RingBuffer( fd ); } s_threadSampling = (Thread*)tracy_malloc( sizeof( Thread ) ); @@ -680,7 +682,7 @@ static void SetupSampling( int64_t& samplingPeriod ) { if( !s_ring[i].CheckTscCaps() ) { - for( int j=0; j(); tracy_free( s_ring ); const char* err = "Tracy Profiler: sampling is disabled due to non-native scheduler clock. Are you running under a VM?"; Profiler::MessageAppInfo( err, strlen( err ) ); @@ -760,7 +762,7 @@ static void SetupSampling( int64_t& samplingPeriod ) } } - for( int i=0; i(); tracy_free( s_ring ); }, nullptr ); }