Ceph启动失败:ERROR: unable to open OSD superblock

发布于 2021-12-03 19:47:02 字数 5393 浏览 1050 评论 9

错误信息如下,而且/data/osd.00/下也确实没有自动生成superblock文件

# /etc/init.d/ceph start osd.00

=== osd.00 === 
Mounting Btrfs on ceph00:/data/osd.00
Scanning for Btrfs filesystems
create-or-move updated item name 'osd.0' weight 0.02 at location {host=ceph00,root=default} to crush map
Starting Ceph osd.00 on ceph00...
2014-04-30 16:32:39.958530 7f8904e9a800 -1  ** ERROR: unable to open OSD superblock on /data/osd.00: (2) No such file or directory
failed: 'ulimit -n 131072;  /usr/local/ceph/bin/ceph-osd -i 00 --pid-file /var/run/ceph/osd.00.pid -c /usr/local/ceph/etc/ceph/ceph.conf '


我的ceph.conf文件内容如下:

;
; Sample ceph ceph.conf file.
;
; This file defines cluster membership, the various locations
; that Ceph stores data, and any other runtime options.

; If a 'host' is defined for a daemon, the init.d start/stop script will
; verify that it matches the hostname (or else ignore it).  If it is
; not defined, it is assumed that the daemon is intended to start on
; the current host (e.g., in a setup with a startup.conf on each
; node).

; The variables $type, $id and $name are available to use in paths
; $type = The type of daemon, possible values: mon, mds and osd
; $id = The ID of the daemon, for mon.alpha, $id will be alpha
; $name = $type.$id

; For example:
; osd.0
;  $type = osd
;  $id = 0
;  $name = osd.0

; mon.beta
;  $type = mon
;  $id = beta
;  $name = mon.beta

; global
[global]
	; enable secure authentication
	auth supported = cephx

        ; allow ourselves to open a lot of files
        max open files = 131072

        ; set log file
        log file = /var/log/ceph/$name.log
        ; log_to_syslog = true        ; uncomment this line to log to syslog

        ; set up pid files
        pid file = /var/run/ceph/$name.pid

        ; If you want to run a IPv6 cluster, set this to true. Dual-stack isn't possible
        ;ms bind ipv6 = true

	keyring = /data/keyring.$name

; monitors
;  You need at least one.  You need at least three if you want to
;  tolerate any node failures.  Always create an odd number.
[mon]
        mon data = /data/$name

        ; If you are using for example the RADOS Gateway and want to have your newly created
        ; pools a higher replication level, you can set a default
        ;osd pool default size = 3

        ; You can also specify a CRUSH rule for new pools
        ; Wiki: http://ceph.newdream.net/wiki/Custom_data_placement_with_CRUSH
        ;osd pool default crush rule = 0

        ; Timing is critical for monitors, but if you want to allow the clocks to drift a
        ; bit more, you can specify the max drift.
        ;mon clock drift allowed = 1

        ; Tell the monitor to backoff from this warning for 30 seconds
        ;mon clock drift warn backoff = 30

	; logging, for debugging monitor crashes, in order of
	; their likelihood of being helpful :)
	;debug ms = 1
	;debug mon = 20
	;debug paxos = 20
	;debug auth = 20

[mon.00]
	host = ceph00
	mon addr = 114.212.81.91:6789

#[mon.01]
#	host = ceph01
#	mon addr = 114.212.85.13:6789

#[mon.02]
#	host = ceph02
#	mon addr = 114.212.87.228:6789

; mds
;  You need at least one.  Define two to get a standby.
[mds]
	; where the mds keeps it's secret encryption keys
#	keyring = /data/keyring.$name

	; mds logging to debug issues.
	;debug ms = 1
	;debug mds = 20

[mds.00]
	host = ceph00

#[mds.01]
#	host = ceph01

[mds.02]
	host = ceph02

; osd
;  You need at least one.  Two if you want data to be replicated.
;  Define as many as you like.
[osd]
	; This is where the osd expects its data
	osd data = /data/$name

	; Ideally, make the journal a separate disk or partition.
 	; 1-10GB should be enough; more if you have fast or many
 	; disks.  You can use a file under the osd data dir if need be
 	; (e.g. /data/$name/journal), but it will be slower than a
 	; separate disk or partition.
        ; This is an example of a file-based journal.
	osd journal = /data/$name/journal
	osd journal size = 1000 ; journal size, in megabytes

        ; If you want to run the journal on a tmpfs (don't), disable DirectIO
        ;journal dio = false

        ; You can change the number of recovery operations to speed up recovery
        ; or slow it down if your machines can't handle it
        ; osd recovery max active = 3

	; osd logging to debug osd issues, in order of likelihood of being
	; helpful
	;debug ms = 1
	;debug osd = 20
	;debug filestore = 20
	;debug journal = 20


	; ### The below options only apply if you're using mkcephfs
	; ### and the devs options
        ; The filesystem used on the volumes
        osd mkfs type = btrfs
        ; If you want to specify some other mount options, you can do so.
        ; for other filesystems use 'osd mount options $fstype'
	osd mount options btrfs = rw,noatime
	; The options used to format the filesystem via mkfs.$fstype
        ; for other filesystems use 'osd mkfs options $fstype'
	; osd mkfs options btrfs =

#	keyring = /data/keyring.$name

[osd.00]
	host = ceph00

	; if 'devs' is not specified, you're responsible for
	; setting up the 'osd data' dir.
	devs = /dev/sda8

#[osd.01]
#	host = ceph01
#	devs = /dev/sda8

[osd.02]
	host = ceph02
	devs = /dev/sda8



求助。。。这个问题又卡了一天了。。

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。

评论(9

策马西风 2021-12-08 18:40:38

mount /dev/vdc /var/lib/ceph/osd/ceph-$ID

反目相谮 2021-12-08 18:31:43

mkcephfs后直接ceph -a start也会报这个错

掩饰不了的爱 2021-12-08 18:31:10

mkcephfs 已经不建议使用,ceph-deploy这个工具比较完善

顾忌 2021-12-08 16:09:31

那就每次启动Ceph之前,运行mkcephfs,麻烦写成脚本,莫非Ceph没有集成这个功能~不懂~

回眸一笑 2021-12-08 15:49:40

额什么叫独立于系统的分区?sda8是我用fdisk命令建的分区。。

天涯离梦残月幽梦 2021-12-08 13:34:06
52[osd.00]
白龙吟 2021-12-08 05:06:51

改成none了,还是同样的错误啊。。估计我这里还没有开始涉及到权限问题呢,找不到superblock可能是其他错误。谢谢~

顾忌 2021-12-06 11:28:02

估计是这里有问题:

[global]

各自安好 2021-12-04 06:17:02

初始化之后 在mon节点 service ceph -a start 就好,为毛单独启动一个节点?

~没有更多了~
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文