我正在尝试并行运行一个函数,但它被卡住并无限运行,直到我强行中断该进程

发布于 2025-01-09 03:36:44 字数 1575 浏览 0 评论 0原文

当我使用 for 循环顺序运行此函数时,它工作得很好,但是与多处理并行执行它会使其连续运行而不会停止。我已经尝试过 Pool 和 Process 方法,但都给出了相同的问题,找不到我混淆的地方。

import numpy as np
import pandas as pd
import time
import multiprocessing as mp
import io

from google.colab import drive
drive.mount('/content/drive')
from google.colab import files
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans

up = files.upload()
data = pd.read_csv(io.BytesIO(up['heart.csv']))

#### SEQUENTIAL PROGRAM - This part runs smoothly
            
s1 = time.time()
            
def kmeans(n):
  sc = StandardScaler()
  X = sc.fit_transform(pd.get_dummies(data, drop_first = True))
  ss = []
  km = KMeans(n_clusters=n, random_state=1)
  km = km.fit(X)
  ss.append(km.inertia_)
  print(f"Process {n} is executing")

  return ss
            
for i in range(1, 50):
  kmeans(i)
            
s2 = time.time()
            
print(f"took {s2-s1}s to finish")
        
### Using Multiprocessing- Process() - Runs continuously without stopping
        
tic2 = time.time()
        
process_list = []
for i in range(1, 50):
  p =  mp.Process(target= kmeans, args = [i])
  p.start()
  process_list.append(p)
        
for process in process_list:
  process.join()
        
 toc2 = time.time()
        
 print('Done in {:.4f} seconds'.format(toc2-tic2))
    
    ### Using Multiprocessing- Pool() - Runs continuously without stopping
    
 tic1 = time.time()
    
 pool = mp.Pool()
 pool.map(kmeans, range(1,50))
 pool.close()
    
 toc1 = time.time()
    
 print('Done in {:.4f} seconds'.format(toc1-tic1))

When I run this function sequentially with a for-loop, it works just fine but doing it in parallel with multiprocessing makes it run continuously without stopping. I have tried both Pool and Process methods but both give the same problem, can't find where I getting it mixed up.

import numpy as np
import pandas as pd
import time
import multiprocessing as mp
import io

from google.colab import drive
drive.mount('/content/drive')
from google.colab import files
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans

up = files.upload()
data = pd.read_csv(io.BytesIO(up['heart.csv']))

#### SEQUENTIAL PROGRAM - This part runs smoothly
            
s1 = time.time()
            
def kmeans(n):
  sc = StandardScaler()
  X = sc.fit_transform(pd.get_dummies(data, drop_first = True))
  ss = []
  km = KMeans(n_clusters=n, random_state=1)
  km = km.fit(X)
  ss.append(km.inertia_)
  print(f"Process {n} is executing")

  return ss
            
for i in range(1, 50):
  kmeans(i)
            
s2 = time.time()
            
print(f"took {s2-s1}s to finish")
        
### Using Multiprocessing- Process() - Runs continuously without stopping
        
tic2 = time.time()
        
process_list = []
for i in range(1, 50):
  p =  mp.Process(target= kmeans, args = [i])
  p.start()
  process_list.append(p)
        
for process in process_list:
  process.join()
        
 toc2 = time.time()
        
 print('Done in {:.4f} seconds'.format(toc2-tic2))
    
    ### Using Multiprocessing- Pool() - Runs continuously without stopping
    
 tic1 = time.time()
    
 pool = mp.Pool()
 pool.map(kmeans, range(1,50))
 pool.close()
    
 toc1 = time.time()
    
 print('Done in {:.4f} seconds'.format(toc1-tic1))

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。

评论(1

一张白纸 2025-01-16 03:36:44

除了无意义的数据之外,代码上的这个微小变化可以完美运行(如果您忽略合成数据引起的异常),

也许这将帮助您了解哪里出了问题。

from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
import pandas as pd
from concurrent.futures import ProcessPoolExecutor
import numpy as np
import warnings

def kmeans(n):
    data = ['John', 'Paul', 'George', 'Ringo']
    ss = []
    print(f"Process {n} is executing")
    try:
        sc = StandardScaler()
        X = sc.fit_transform(pd.get_dummies(data, drop_first=True))
        km = KMeans(n_clusters=n, random_state=1)
        km = km.fit(X)
        ss.append(km.inertia_)
    except Exception:
        pass
    print(f"Process {n} is ending")
    return ss


def main():
    warnings.filterwarnings('ignore')
    with ProcessPoolExecutor() as executor:
        executor.map(kmeans, list(range(50)))


if __name__ == '__main__':
    main()

Apart from the nonsensical data this minor variation on your code runs perfectly (if you ignore the exceptions arising from the synthetic data)

Perhaps this will help you to see where you've gone wrong.

from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
import pandas as pd
from concurrent.futures import ProcessPoolExecutor
import numpy as np
import warnings

def kmeans(n):
    data = ['John', 'Paul', 'George', 'Ringo']
    ss = []
    print(f"Process {n} is executing")
    try:
        sc = StandardScaler()
        X = sc.fit_transform(pd.get_dummies(data, drop_first=True))
        km = KMeans(n_clusters=n, random_state=1)
        km = km.fit(X)
        ss.append(km.inertia_)
    except Exception:
        pass
    print(f"Process {n} is ending")
    return ss


def main():
    warnings.filterwarnings('ignore')
    with ProcessPoolExecutor() as executor:
        executor.map(kmeans, list(range(50)))


if __name__ == '__main__':
    main()
~没有更多了~
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文