之前c++服务器,是端游转过手游来的,在我不断的优化下,成功从c++98到c++11,以及32位到64位的转变,从端游服务器,转到手游服务器,上线人数单服1W+没太大压力。
go是我业余的时候接触到的,在现在大环境下,高并发和跨平台是现在开发游戏服务器的主流,之前那套c++说实在是够用,只要能达到那个量,单服性能极致,我只服c++。
skynet+lua,c底层,lua逻辑,我也过来过c++底层,lua逻辑,但实际效果是lua并不是我想要去写的,可能我对lua脚本不感兴趣。go做的事情和skynet做的大同小异,都是携程,抢占式g调度模式,go 有个goroutine和channel,skynet lua虚拟机。
当时设计go的时候,由于跟c++大轮子有点出路,c++是循环轮子模式,单逻辑处理能力有目共睹,再设计go的时候我选择了actor模式,每个角色都相互独立,这点携程和信道立功了。全服中,没用到锁的地方不用锁,全服用到锁的地方就一两处,acotr模式立功了,每个角色有各自的消息队列,接受和处理信息。
大体主流网络模式是rpc,不过不是第三方的rpc,而是自己的rpc,不用protbuf那样定义消息体,只需要类型即可,通过refeclt go里面神奇的东西,用好事半功倍,在c++里面只能用宏来实现,或者用template,当然通信方面也支持protobuf客户端与服务器之间的传输。
[image.png](https://static.studygolang.com/180113/aad06eeec4c428b80ac82d46a4cff3af)
![}HM_B56RZRAU1](VQ$BQOCK.png](https://static.studygolang.com/180113/79ed7521b38914c838c02d31416462d7.png)
第 1 条附言 ·
https://github.com/bobohume/go-server
第 2 条附言 ·
go架设的服务器源码如下https://github.com/bobohume/go-server
package main
import (
"time"
"fmt"
"sync"
"runtime"
"github.com/smartystreets/go-disruptor"
)
const (
RingBufferSize = 1024 * 64
RingBufferMask = RingBufferSize - 1
ReserveOne = 1
ReserveMany = 16
ReserveManyDelta = ReserveMany - 1
DisruptorCleanup = time.Millisecond * 10
)
var ringBuffer = [RingBufferSize]int64{}
var num = 5
func main() {
NumPublishers := num //runtime.NumCPU()
totalIterations := int64(1000 * 1000 * 20)
iterations := totalIterations / int64(NumPublishers)
totalIterations = iterations * int64(NumPublishers)
fmt.Printf("Total: %d, Iterations: %d, Publisher: %d, Consumer: 1\n", totalIterations, iterations, NumPublishers)
runtime.GOMAXPROCS(NumPublishers)
var consumer = &countConsumer{TotalIterations: totalIterations, Count: 0}
consumer.WG.Add(1)
controller := disruptor.Configure(RingBufferSize).WithConsumerGroup(consumer).BuildShared()
controller.Start()
defer controller.Stop()
var wg sync.WaitGroup
wg.Add(NumPublishers + 1)
var sendWG sync.WaitGroup
sendWG.Add(NumPublishers)
for i := 0; i < NumPublishers; i++ {
go func() {
writer := controller.Writer()
wg.Done()
wg.Wait()
current1 := disruptor.InitialSequenceValue
for current1 < iterations {
current := writer.Reserve(1)
ringBuffer[current&RingBufferMask] = current
writer.Commit(current, current)
current1++
}
sendWG.Done()
}()
}
wg.Done()
t := time.Now().UnixNano()
wg.Wait() //waiting for ready as a barrier
fmt.Println("start to publish")
sendWG.Wait()
fmt.Println("Finished to publish")
consumer.WG.Wait()
fmt.Println("Finished to consume") //waiting for consumer
t = (time.Now().UnixNano() - t) / 1000000 //ms
fmt.Printf("opsPerSecond: %d\n", totalIterations*1000/t)
main1()
}
type countConsumer struct {
Count int64
TotalIterations int64
WG sync.WaitGroup
}
func (cc *countConsumer) Consume(lower, upper int64) {
for lower <= upper {
message := ringBuffer[lower&RingBufferMask]
if message != lower {
warning := fmt.Sprintf("\nRace condition--Sequence: %d, Message: %d\n", lower, message)
fmt.Printf(warning)
panic(warning)
}
lower++
cc.Count++
//fmt.Printf("count: %d, message: %d\n", cc.Count-1, message)
if cc.Count == cc.TotalIterations {
cc.WG.Done()
return
}
}
}
func main1() {
NumPublishers := num //runtime.NumCPU()
totalIterations := int64(1000 * 1000 * 20)
iterations := totalIterations / int64(NumPublishers)
totalIterations = iterations * int64(NumPublishers)
channel := make(chan int64, 1024*64)
var wg sync.WaitGroup
wg.Add(NumPublishers + 1)
var readerWG sync.WaitGroup
readerWG.Add(1)
for i := 0; i < NumPublishers; i++ {
go func() {
wg.Done()
wg.Wait()
for i := int64(0); i < iterations; {
channel <- i
i++
}
}()
}
go func() {
for i := int64(0); i < totalIterations; i++ {
select {
case msg := <-channel:
if NumPublishers == 1 && msg != i {
//panic("Out of sequence")
}
}
}
readerWG.Done()
}()
wg.Done()
t := time.Now().UnixNano()
wg.Wait()
readerWG.Wait()
t = (time.Now().UnixNano() - t) / 1000000 //ms
fmt.Printf("opsPerSecond: %d\n", totalIterations*1000/t)
}
#21