Commit 6a376dbb authored by Sushant Mahajan's avatar Sushant Mahajan

added some persistance information and made entries in raft data structure

parent 9bbecccb
package raft package raft
import ( import (
"io/ioutil"
"log" "log"
"math/rand" "math/rand"
"net" "net"
"net/rpc" "os"
"strconv" "strconv"
"sync" "sync"
"time" "time"
...@@ -18,13 +19,10 @@ const ( ...@@ -18,13 +19,10 @@ const (
MIN_TIMEOUT = 300 MIN_TIMEOUT = 300
MAX_TIMEOUT = 500 MAX_TIMEOUT = 500
LEADER = iota LEADER = iota
CANDIDATE = iota CANDIDATE
FOLLOWER = iota FOLLOWER
) )
// Logger
var Info *log.Logger
// Global variable for generating unique log sequence numbers // Global variable for generating unique log sequence numbers
var lsn Lsn var lsn Lsn
...@@ -51,24 +49,24 @@ type ClusterConfig struct { ...@@ -51,24 +49,24 @@ type ClusterConfig struct {
Servers []ServerConfig // All servers in this cluster Servers []ServerConfig // All servers in this cluster
} }
type ClientAppend struct{ type ClientAppend struct {
logEntry LogEntry logEntry LogEntry
} }
type VoteRequest struct{ type VoteRequest struct {
term int
candidateId int
lastLogIndex int
lastLogTerm int
} }
type AppendRPC struct{ type AppendRPC struct {
} }
type Timeout struct{ type Timeout struct {
} }
type RaftEvent interface{ type RaftEvent interface {
} }
type SharedLog interface { type SharedLog interface {
...@@ -83,7 +81,10 @@ type Raft struct { ...@@ -83,7 +81,10 @@ type Raft struct {
clusterConfig *ClusterConfig // Cluster clusterConfig *ClusterConfig // Cluster
id int // Server id id int // Server id
sync.RWMutex sync.RWMutex
Info *log.Logger //log for raft instance
eventCh chan RaftEvent //receive events related to various states eventCh chan RaftEvent //receive events related to various states
votedFor int
currentTerm int
} }
// Log entry interface // Log entry interface
...@@ -101,31 +102,41 @@ type LogEntryData struct { ...@@ -101,31 +102,41 @@ type LogEntryData struct {
conn net.Conn // Connection for communicating with client conn net.Conn // Connection for communicating with client
} }
// Structure for calling commit RPC func getCurrentTerm(serverId int, info *log.Logger) int {
type CommitData struct { if file, err := os.Open("currentTerm" + strconv.Itoa(serverId)); err != nil {
Id Lsn ioutil.WriteFile("currentTerm"+strconv.Itoa(serverId), []byte("0"), 0666)
} info.Println("wrote in term file:0")
return 0
// Structure used for replying to the RPC calls } else {
type Reply struct { if data, err := ioutil.ReadFile(file.Name()); err != nil {
X int info.Println("error reading file")
return -1
} else {
info.Println("read from file")
if t, err2 := strconv.Atoi(string(data)); err2 != nil {
info.Println("error converting")
return -1
} else {
info.Println("Converted success", t)
return t
}
}
return -1
}
} }
// Structure for registering RPC methods
type AppendEntries struct{}
// Creates a raft object. This implements the SharedLog interface. // Creates a raft object. This implements the SharedLog interface.
// commitCh is the channel that the kvstore waits on for committed messages. // commitCh is the channel that the kvstore waits on for committed messages.
// When the process starts, the local disk log is read and all committed // When the process starts, the local disk log is read and all committed
// entries are recovered and replayed // entries are recovered and replayed
func NewRaft(config *ClusterConfig, thisServerId int, commitCh chan LogEntry, eventCh, chan RaftEvent, logger *log.Logger) (*Raft, error) { func NewRaft(config *ClusterConfig, thisServerId int, commitCh chan LogEntry, eventCh chan RaftEvent, toDebug bool) (*Raft, error) {
rft := new(Raft) rft := new(Raft)
rft.commitCh = commitCh rft.commitCh = commitCh
rft.clusterConfig = config rft.clusterConfig = config
rft.id = thisServerId rft.id = thisServerId
Info = logger
lsn = 0
rft.eventCh = eventCh rft.eventCh = eventCh
rft.Info = getLogger(thisServerId, toDebug)
rft.currentTerm = getCurrentTerm(thisServerId, rft.Info)
return rft, nil return rft, nil
} }
...@@ -142,52 +153,6 @@ func NewLogEntry(data []byte, committed bool, conn net.Conn) *LogEntryData { ...@@ -142,52 +153,6 @@ func NewLogEntry(data []byte, committed bool, conn net.Conn) *LogEntryData {
return entry return entry
} }
// Goroutine that monitors channel to check if the majority of servers have replied
func monitorAckChannel(rft *Raft, ack_ch <-chan int, log_entry LogEntry, majCh chan bool) {
acks_received := 0
num_servers := len(rft.clusterConfig.Servers)
required_acks := num_servers / 2
up := make(chan bool, 1)
err := false
go func() {
time.Sleep(ACK_TIMEOUT * time.Second)
up <- true
}()
for {
select {
case temp := <-ack_ch:
Info.Println("Ack Received:", temp)
acks_received += temp
if acks_received == required_acks {
Info.Println("Majority Achieved", log_entry.(*LogEntryData).Id)
rft.LogArray[log_entry.(*LogEntryData).Id].Committed = true
//Info.Println(rft.LogArray)
rft.commitCh <- log_entry
temp := new(CommitData)
temp.Id = log_entry.(*LogEntryData).Id
for _, server := range rft.clusterConfig.Servers[1:] {
go doCommitRPCCall(server.Hostname, server.LogPort, temp)
}
majCh <- true
err = true
break
}
case <-up:
Info.Println("Error")
err = true
break
}
if err {
break
}
}
}
// Gets the Lsn // Gets the Lsn
func (entry *LogEntryData) GetLsn() Lsn { func (entry *LogEntryData) GetLsn() Lsn {
return entry.Id return entry.Id
...@@ -208,39 +173,9 @@ func (entry *LogEntryData) SetCommitted(committed bool) { ...@@ -208,39 +173,9 @@ func (entry *LogEntryData) SetCommitted(committed bool) {
entry.Committed = committed entry.Committed = committed
} }
// Call CommitRPC to inform the followers of newly committed log entry
func doCommitRPCCall(hostname string, logPort int, temp *CommitData) {
Info.Println("Commit RPC")
client, err := rpc.Dial("tcp", hostname+":"+strconv.Itoa(logPort))
if err != nil {
Info.Fatal("Dialing:", err)
}
reply := new(Reply)
args := temp
Info.Println("Calling Commit RPC", logPort)
commitCall := client.Go("AppendEntries.CommitRPC", args, reply, nil) //let go allocate done channel
commitCall = <-commitCall.Done
Info.Println("Reply", commitCall, reply.X)
}
//make rpc call to followers
func doRPCCall(ackChan chan int, hostname string, logPort int, temp *LogEntryData) {
client, err := rpc.Dial("tcp", hostname+":"+strconv.Itoa(logPort))
if err != nil {
Info.Fatal("Dialing:", err)
}
reply := new(Reply)
args := temp
Info.Println("RPC Called", logPort)
appendCall := client.Go("AppendEntries.AppendEntriesRPC", args, reply, nil) //let go allocate done channel
appendCall = <-appendCall.Done
Info.Println("Reply", appendCall, reply.X)
ackChan <- reply.X
}
//make raft implement the append function //make raft implement the append function
func (rft *Raft) Append(data []byte, conn net.Conn) (LogEntry, error) { func (rft *Raft) Append(data []byte, conn net.Conn) (LogEntry, error) {
Info.Println("Append Called") rft.Info.Println("Append Called")
if rft.id != 1 { if rft.id != 1 {
return nil, ErrRedirect(1) return nil, ErrRedirect(1)
} }
...@@ -250,24 +185,12 @@ func (rft *Raft) Append(data []byte, conn net.Conn) (LogEntry, error) { ...@@ -250,24 +185,12 @@ func (rft *Raft) Append(data []byte, conn net.Conn) (LogEntry, error) {
rft.LogArray = append(rft.LogArray, temp) rft.LogArray = append(rft.LogArray, temp)
ackChan := make(chan int)
majChan := make(chan bool)
go monitorAckChannel(rft, ackChan, temp, majChan)
for _, server := range rft.clusterConfig.Servers[1:] {
go doRPCCall(ackChan, server.Hostname, server.LogPort, temp)
}
if <-majChan {
//
}
return temp, nil return temp, nil
} }
//AddToChannel //AddToChannel
func (rft *Raft) AddToChannel(entry LogEntry) { func (rft *Raft) AddToChannel(entry LogEntry) {
Info.Println("Adding to commit", entry) rft.Info.Println("Adding to commit", entry)
rft.commitCh <- entry rft.commitCh <- entry
} }
...@@ -298,53 +221,53 @@ func (e ErrRedirect) Error() string { ...@@ -298,53 +221,53 @@ func (e ErrRedirect) Error() string {
} }
//entry loop to raft //entry loop to raft
func (raft *Raft) loop() { func (rft *Raft) loop() {
state := FOLLOWER state := FOLLOWER
for { for {
rft.Info.Println("hello")
switch state { switch state {
case FOLLOWER: case FOLLOWER:
state = follower() state = follower()
case CANDIDATE: // case CANDIDATE:
state = candidate() // state = candidate()
case LEADER: // case LEADER:
state = leader() // state = leader()
default: default:
return return
} }
} }
} }
func (raft *Raft) follower() { func getTimer() *time.Timer {
return time.NewTimer(time.Millisecond * time.Duration((rand.Intn(MAX_TIMEOUT)+MIN_TIMEOUT)%MAX_TIMEOUT))
}
func (rft *Raft) follower() int {
//start candidate timeout //start candidate timeout
isCandidateChan = time.After((rand.Intn(MAX_TIMEOUT) + MIN_TIMEOUT) % MAX_TIMEOUT) candTimer := getTimer()
for { for {
//wrap in select //wrap in select
event := <- raft.eventCh select {
case <-candTimer.C:
return CANDIDATE
case event := <-rft.eventCh:
switch event.(type) { switch event.(type) {
case *ClientAppend: case *ClientAppend:
// Do not handle clients in follower mode. Send it back up the // Do not handle clients in follower mode. Send it back up the
// pipe with committed = false // pipe with committed = false
event.(*LogEntry).SetCommitted(false) event.(*ClientAppend).logEntry.SetCommitted(false)
raft.commitCh <- event.(*LogEntry) rft.commitCh <- event.(*ClientAppend).logEntry
case *VoteRequest: case *VoteRequest:
msg = event.msg req := event.(*VoteRequest)
if msg.term < currentterm, respond with if req.term < rft.currentTerm {
if msg.term > currentterm, upgrade currentterm //reply as - not accepted as leader
if not already voted in my term }
reset timer if req.term > rft.currentTerm {
reply ok to event.msg.serverid //update currentTerm
remember term, leader id (either in log or in separate file) }
case *AppendRPC: //condition for - if not voted in current term
reset timer }
if msg.term < currentterm, ignore }
reset heartbeat timer
upgrade to event.msg.term if necessary
if prev entries of my log and event.msg match
add to disk log
flush disk log
respond ok to event.msg.serverid
else
respond err.
case *Timeout : return candidate // new state back to loop()
} }
} }
// server.go // server.go
package main package raft
import ( import (
"fmt"
"io/ioutil" "io/ioutil"
"log" "log"
"math/rand"
"os" "os"
"raft"
"strconv" "strconv"
"time"
) )
// Logger var rafts map[int]*Raft
var Info *log.Logger
//global raft object for each server instance func getLogger(serverId int, toDebug bool) (l *log.Logger) {
var rft *raft.Raft
//Simple logger that is enabled or disabled according to the command line arguments. In test cases
//it is redirected to a file per server {1..5}.
//arguments: current server id, toggle enable/disable
//return: none
//receiver: none
func initLogger(serverId int, toDebug bool) {
// Logger Initializaion
if !toDebug { if !toDebug {
Info = log.New(ioutil.Discard, "INFO: ", log.Ldate|log.Ltime|log.Lshortfile) l = log.New(ioutil.Discard, "INFO: ", log.Ltime|log.Lshortfile)
} else { } else {
Info = log.New(os.Stdout, "INFO: ", log.Ldate|log.Ltime|log.Lshortfile) logf, _ := os.OpenFile(strconv.Itoa(serverId), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
l = log.New(logf, "INFO: ", log.Ltime|log.Lshortfile)
} }
Info.Println("Initialized server.") l.Println("Initialized server.")
return l
} }
//Entry point for application. Starts all major server go routines and then waits for ever func Start(serverId int, commitCh chan LogEntry, eventCh chan RaftEvent, dummyCh chan bool, toDebug bool) {
func main() { clusterConfig, _ := NewClusterConfig(5)
rand.Seed(time.Now().UnixNano()) rft, _ := NewRaft(clusterConfig, serverId, commitCh, eventCh, true)
sid, err := strconv.Atoi(os.Args[1]) if rafts == nil {
rafts = make(map[int]*Raft)
if err != nil {
Info.Println("argument ", os.Args[1], "is not string")
}
if len(os.Args) > 3 {
initLogger(sid, true)
} else {
initLogger(sid, false)
} }
Info.Println("Starting") rafts[serverId] = rft
fmt.Println(len(rafts))
serverCount, err2 := strconv.Atoi((os.Args[2])) rft.loop()
if err2 != nil {
Info.Println("argument ", os.Args[2], "is not string")
}
server, _ := raft.NewServerConfig(sid)
clusterConfig, _ := raft.NewClusterConfig(serverCount)
commitCh := make(chan raft.LogEntry)
rft, _ = raft.NewRaft(clusterConfig, sid, commitCh, Info)
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment