mirror of
https://github.com/wolfpld/tracy
synced 2025-04-29 12:23:53 +00:00
GPU context registration.
This commit is contained in:
parent
ce35009c63
commit
3c00ce0958
48
TracyOpenGL.hpp
Normal file
48
TracyOpenGL.hpp
Normal file
@ -0,0 +1,48 @@
|
||||
#ifndef __TRACYOPENGL_HPP__
|
||||
#define __TRACYOPENGL_HPP__
|
||||
|
||||
#ifdef TRACY_ENABLE
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "client/TracyProfiler.hpp"
|
||||
|
||||
namespace tracy
|
||||
{
|
||||
|
||||
extern std::atomic<uint16_t> s_gpuCtxCounter;
|
||||
|
||||
template<int Num>
|
||||
class GpuCtx
|
||||
{
|
||||
public:
|
||||
GpuCtx()
|
||||
: m_context( s_gpuCtxCounter.fetch_add( 1, std::memory_order_relaxed ) )
|
||||
{
|
||||
glGenQueries( Num, m_query );
|
||||
|
||||
int64_t tgpu;
|
||||
glGetInteger64v( GL_TIMESTAMP, &tgpu );
|
||||
int64_t tcpu = Profiler::GetTime();
|
||||
|
||||
Magic magic;
|
||||
auto& token = s_token.ptr;
|
||||
auto& tail = token->get_tail_index();
|
||||
auto item = token->enqueue_begin<moodycamel::CanAlloc>( magic );
|
||||
item->hdr.type = QueueType::GpuNewContext;
|
||||
item->gpuNewContext.cputime = tcpu;
|
||||
item->gpuNewContext.gputime = tgpu;
|
||||
item->gpuNewContext.context = m_context;
|
||||
tail.store( magic + 1, std::memory_order_release );
|
||||
}
|
||||
|
||||
private:
|
||||
unsigned int m_query[Num];
|
||||
uint16_t m_context;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
@ -98,6 +98,7 @@ static InitTimeWrapper init_order(101) s_initTime { Profiler::GetTime() };
|
||||
static RPMallocInit init_order(102) s_rpmalloc_init;
|
||||
moodycamel::ConcurrentQueue<QueueItem> init_order(103) s_queue( QueuePrealloc );
|
||||
std::atomic<uint32_t> init_order(104) s_lockCounter( 0 );
|
||||
std::atomic<uint16_t> init_order(104) s_gpuCtxCounter( 0 );
|
||||
|
||||
#ifdef TRACY_COLLECT_THREAD_NAMES
|
||||
struct ThreadNameData;
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "../common/tracy_lz4.hpp"
|
||||
#include "../common/TracyQueue.hpp"
|
||||
#include "../common/TracyAlloc.hpp"
|
||||
#include "../common/TracySystem.hpp"
|
||||
|
||||
#if defined _MSC_VER || defined __CYGWIN__
|
||||
# include <intrin.h>
|
||||
|
@ -29,6 +29,7 @@ enum class QueueType : uint8_t
|
||||
PlotName,
|
||||
Message,
|
||||
MessageLiteral,
|
||||
GpuNewContext,
|
||||
NUM_TYPES
|
||||
};
|
||||
|
||||
@ -138,6 +139,13 @@ struct QueueMessage
|
||||
uint64_t text; // ptr
|
||||
};
|
||||
|
||||
struct QueueGpuNewContext
|
||||
{
|
||||
int64_t cputime;
|
||||
int64_t gputime;
|
||||
uint16_t context;
|
||||
};
|
||||
|
||||
struct QueueHeader
|
||||
{
|
||||
union
|
||||
@ -165,6 +173,7 @@ struct QueueItem
|
||||
QueueLockMark lockMark;
|
||||
QueuePlotData plotData;
|
||||
QueueMessage message;
|
||||
QueueGpuNewContext gpuNewContext;
|
||||
};
|
||||
};
|
||||
|
||||
@ -194,6 +203,7 @@ static const size_t QueueDataSize[] = {
|
||||
sizeof( QueueHeader ) + sizeof( QueueStringTransfer ), // plot name
|
||||
sizeof( QueueHeader ) + sizeof( QueueMessage ),
|
||||
sizeof( QueueHeader ) + sizeof( QueueMessage ), // literal
|
||||
sizeof( QueueHeader ) + sizeof( QueueGpuNewContext ),
|
||||
};
|
||||
|
||||
static_assert( QueueItemSize == 32, "Queue item size not 32 bytes" );
|
||||
|
@ -112,6 +112,11 @@ struct ThreadData
|
||||
Vector<MessageData*> messages;
|
||||
};
|
||||
|
||||
struct GpuCtxData
|
||||
{
|
||||
int64_t timeDiff;
|
||||
};
|
||||
|
||||
struct LockMap
|
||||
{
|
||||
uint32_t srcloc;
|
||||
|
@ -588,6 +588,9 @@ void View::Process( const QueueItem& ev )
|
||||
case QueueType::MessageLiteral:
|
||||
ProcessMessageLiteral( ev.message );
|
||||
break;
|
||||
case QueueType::GpuNewContext:
|
||||
ProcessGpuNewContext( ev.gpuNewContext );
|
||||
break;
|
||||
case QueueType::Terminate:
|
||||
m_terminate = true;
|
||||
break;
|
||||
@ -844,6 +847,15 @@ void View::ProcessMessageLiteral( const QueueMessage& ev )
|
||||
InsertMessageData( msg, ev.thread );
|
||||
}
|
||||
|
||||
void View::ProcessGpuNewContext( const QueueGpuNewContext& ev )
|
||||
{
|
||||
assert( ev.context == m_gpuData.size() );
|
||||
auto gpu = m_slab.Alloc<GpuCtxData>();
|
||||
gpu->timeDiff = int64_t( ev.cputime * m_timerMul - ev.gputime );
|
||||
std::lock_guard<std::mutex> lock( m_lock );
|
||||
m_gpuData.push_back( gpu );
|
||||
}
|
||||
|
||||
void View::CheckString( uint64_t ptr )
|
||||
{
|
||||
if( m_strings.find( ptr ) != m_strings.end() ) return;
|
||||
|
@ -67,6 +67,7 @@ private:
|
||||
void ProcessPlotData( const QueuePlotData& ev );
|
||||
void ProcessMessage( const QueueMessage& ev );
|
||||
void ProcessMessageLiteral( const QueueMessage& ev );
|
||||
void ProcessGpuNewContext( const QueueGpuNewContext& ev );
|
||||
|
||||
void CheckString( uint64_t ptr );
|
||||
void CheckThreadString( uint64_t id );
|
||||
@ -158,6 +159,7 @@ private:
|
||||
Vector<PlotData*> m_plots;
|
||||
Vector<MessageData*> m_messages;
|
||||
Vector<TextData*> m_textData;
|
||||
Vector<GpuCtxData*> m_gpuData;
|
||||
std::unordered_map<uint64_t, const char*> m_strings;
|
||||
std::unordered_map<uint64_t, const char*> m_threadNames;
|
||||
std::unordered_map<uint64_t, SourceLocation> m_sourceLocation;
|
||||
|
Loading…
x
Reference in New Issue
Block a user