Linux C++当一个不同的(POSIX)线程写入其他文件时,文件写入会慢

发布于 2025-02-04 04:50:19 字数 4434 浏览 4 评论 0原文

文件写入SSD。操作系统红帽7.9

文件被打开 打开(fileName,o_wronly | o_creat,s_iread | s_iwrite | s_irgrp | s_iroth); https://linux.die.net/man/3/apen

使用写作呼叫( https://linux.die.net/man/2/write

方案1 单个线程写作以提交

方案2 3个线程写入3个不同的文件

,以上所有文件都写入同一文件系统(XFS)

观察:AVG写入时间增加了方案2。 3个线程)数据写入速率相同或小于方案1速率。

这是预期的行为吗?如果是这样的解释。

更多信息: 16带有足够CPU和内存的核心机器(即它们不是瓶颈)。 书面块尺寸104(每单个写入)。 很少的结果编号(无CPU绑定的程序运行):

方案1:6540每秒写入。 avg时间每写= 1.389美国

方案2:1806 x 3每秒写入。每次写入= 1.894、1.810、1.881美国

下运行时,请提高写作时间

*我注意到的另一件事是在隔离的CPU最小示例代码 : 编译“ g ++ -o3 minimal.cpp -lpthread”

env变量thread_count设置并发线程计数和sleep_time控制写入速率


#include <sys/time.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <vector>
#include <iostream>
#include <thread> 
#include <atomic>

using namespace std;


int64_t getMicroSecDiffFrom(const timeval& begin);

struct Stat
{
    atomic<uint64_t> consumedTime {0};
    atomic<uint64_t> sampleCount {0};
};

Stat g_stats[10];

void writeToFile(int32_t threadIndex)
{
    char fileName[128];
    snprintf(fileName, sizeof(fileName), "test_Data_Thd_%d", threadIndex);
    
    auto fd = open(fileName, O_WRONLY|O_CREAT, S_IREAD|S_IWRITE|S_IRGRP|S_IROTH);
    if(fd < 0)
    {
        cerr << "Failed to create file [" << fileName << "]. " << endl;
        exit(-1);
    }

    auto envVar = getenv("SLEEP_TIME");
    uint32_t sleepTime = 1000;
    if(envVar)
    {
        sleepTime = atoi(envVar);
    }

    char buf[104] = "dat123";
    uint64_t written = 0;
    while(true)
    {       
        timeval beg;
        gettimeofday(&beg, nullptr);
        if(write(fd, buf, sizeof(buf)) != sizeof(buf))
        {
            cerr << "Failed to write. Written =" << written << endl;
            exit(-2);
        }       
        g_stats[threadIndex].consumedTime.fetch_add(getMicroSecDiffFrom(beg), memory_order_relaxed);
        g_stats[threadIndex].sampleCount.fetch_add(1, memory_order_relaxed);

        written += sizeof(buf);

        if(sleepTime > 0)
        {
            usleep(sleepTime);
        }
    }
}

int main()
{
    thread t0(writeToFile, 0);
    
    int32_t threadCount = 0;
    auto envVar = getenv("THREAD_COUNT");
    if(envVar)
    {
        threadCount = atoi(envVar);
    }

    vector<thread> thds;
    for(auto i = 0; i < threadCount; i++)
    {       
        thds.push_back(thread(writeToFile, i + 1));
    }

    string stat;
    int32_t round = 0;
    Stat runningAvg[10];
    while(true)
    {       
        stat.clear();
        round++;
        
        for(auto i = 0; i < (threadCount + 1); i++)
        {
            auto samples = g_stats[i].sampleCount.load(memory_order_relaxed);
            auto time = g_stats[i].consumedTime.load(memory_order_relaxed);
            if(samples > 0)
            {
                if(round > 5)
                {
                    runningAvg[i].consumedTime += time;
                    runningAvg[i].sampleCount += samples;
                }

                char buff[32];
                snprintf(buff, sizeof(buff), "%.3f(c=%lu RAvg=%.3f)\t", ((double) time) / samples, samples, 
                        ((double) runningAvg[i].consumedTime) / runningAvg[i].sampleCount);
                stat += buff;
                
                g_stats[i].sampleCount.store(0, memory_order_relaxed);
                g_stats[i].consumedTime.store(0, memory_order_relaxed);
            }

        }

        printf("\rWrite times(us) R=%d %s", round, stat.c_str());
        fflush(stdout);     
        sleep(1);
    }

    
    
    return 0;
}

inline int64_t getMicroSecDiff(const timeval& begin, const timeval& end)
{
    constexpr uint32_t MicroSecPerSec = 1000 * 1000;
    return (end.tv_sec - begin.tv_sec) * MicroSecPerSec + (end.tv_usec - begin.tv_usec);
}

inline int64_t getMicroSecDiffFrom(const timeval& begin)
{
    timeval end;
    gettimeofday(&end, nullptr);
    
    return getMicroSecDiff(begin, end);
}

File write done to a SSD. Operating system Red-Hat 7.9

File is opened as
open(fileName, O_WRONLY|O_CREAT, S_IREAD|S_IWRITE|S_IRGRP|S_IROTH);
https://linux.die.net/man/3/open

Written using write call (https://linux.die.net/man/2/write)

Scenario 1
A single thread writes to file A

Scenario 2
3 threads write to 3 different files

All above files are written to same file system(xfs)

Observation : Avg write time increases in scenario 2. This happens even if I try to keep TOTAL(i.e. addition of all 3 threads) data write rate same or smaller than scenario 1 rate.

is this the expected behavior ? if so what is the explanation.

More info:
16 core machine used with sufficient CPU and memory (i.e. they are NOT the bottle neck).
Written block size 104 (per single write).
Few result numbers (Program running WITHOUT CPU binding) :

Scenario 1 : 6540 writes per second. Avg time per write = 1.389 us

Scenario 2 : 1806 X 3 writes per second. Avg times per write = 1.894, 1.810, 1.881 us

*one other thing I noted is write time improves when they are run under isolated CPUs

Minimal Sample code :
Compiling "g++ -O3 Minimal.cpp -lpthread"

Env variable THREAD_COUNT sets the concurrent thread count and SLEEP_TIME controls the write rate


#include <sys/time.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <vector>
#include <iostream>
#include <thread> 
#include <atomic>

using namespace std;


int64_t getMicroSecDiffFrom(const timeval& begin);

struct Stat
{
    atomic<uint64_t> consumedTime {0};
    atomic<uint64_t> sampleCount {0};
};

Stat g_stats[10];

void writeToFile(int32_t threadIndex)
{
    char fileName[128];
    snprintf(fileName, sizeof(fileName), "test_Data_Thd_%d", threadIndex);
    
    auto fd = open(fileName, O_WRONLY|O_CREAT, S_IREAD|S_IWRITE|S_IRGRP|S_IROTH);
    if(fd < 0)
    {
        cerr << "Failed to create file [" << fileName << "]. " << endl;
        exit(-1);
    }

    auto envVar = getenv("SLEEP_TIME");
    uint32_t sleepTime = 1000;
    if(envVar)
    {
        sleepTime = atoi(envVar);
    }

    char buf[104] = "dat123";
    uint64_t written = 0;
    while(true)
    {       
        timeval beg;
        gettimeofday(&beg, nullptr);
        if(write(fd, buf, sizeof(buf)) != sizeof(buf))
        {
            cerr << "Failed to write. Written =" << written << endl;
            exit(-2);
        }       
        g_stats[threadIndex].consumedTime.fetch_add(getMicroSecDiffFrom(beg), memory_order_relaxed);
        g_stats[threadIndex].sampleCount.fetch_add(1, memory_order_relaxed);

        written += sizeof(buf);

        if(sleepTime > 0)
        {
            usleep(sleepTime);
        }
    }
}

int main()
{
    thread t0(writeToFile, 0);
    
    int32_t threadCount = 0;
    auto envVar = getenv("THREAD_COUNT");
    if(envVar)
    {
        threadCount = atoi(envVar);
    }

    vector<thread> thds;
    for(auto i = 0; i < threadCount; i++)
    {       
        thds.push_back(thread(writeToFile, i + 1));
    }

    string stat;
    int32_t round = 0;
    Stat runningAvg[10];
    while(true)
    {       
        stat.clear();
        round++;
        
        for(auto i = 0; i < (threadCount + 1); i++)
        {
            auto samples = g_stats[i].sampleCount.load(memory_order_relaxed);
            auto time = g_stats[i].consumedTime.load(memory_order_relaxed);
            if(samples > 0)
            {
                if(round > 5)
                {
                    runningAvg[i].consumedTime += time;
                    runningAvg[i].sampleCount += samples;
                }

                char buff[32];
                snprintf(buff, sizeof(buff), "%.3f(c=%lu RAvg=%.3f)\t", ((double) time) / samples, samples, 
                        ((double) runningAvg[i].consumedTime) / runningAvg[i].sampleCount);
                stat += buff;
                
                g_stats[i].sampleCount.store(0, memory_order_relaxed);
                g_stats[i].consumedTime.store(0, memory_order_relaxed);
            }

        }

        printf("\rWrite times(us) R=%d %s", round, stat.c_str());
        fflush(stdout);     
        sleep(1);
    }

    
    
    return 0;
}

inline int64_t getMicroSecDiff(const timeval& begin, const timeval& end)
{
    constexpr uint32_t MicroSecPerSec = 1000 * 1000;
    return (end.tv_sec - begin.tv_sec) * MicroSecPerSec + (end.tv_usec - begin.tv_usec);
}

inline int64_t getMicroSecDiffFrom(const timeval& begin)
{
    timeval end;
    gettimeofday(&end, nullptr);
    
    return getMicroSecDiff(begin, end);
}

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。
列表为空,暂无数据
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文